diff --git a/.build/ca.crt b/.build/ca.crt new file mode 100644 index 0000000000..e5a4081a02 --- /dev/null +++ b/.build/ca.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIUB/AJgMX+fmeXvBOUWW7WR+XKZ6AwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDExMjAwNDExMjFaFw0zNDEx +MTgwNDExMjFaMEUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQC8A5+//15VxRCxpHzl7srYx6uWQi1/7q5VFWFZab+7 +82PLr3pV/zMjSMEPBZdq46NWWNnXIFoFHd5MFnN4fNIQ1GIEsTF0kYy142qllnp3 +vLBVBu24n4dsmI8ygl8+1PuGwk45Mz+vOL+RjNIo6ra9yJzYFnZOGCqlt0kWkCau +HR/43ms0vhKq8FaDXPdVXn9Z3EZScxRKQwlfAKOUxLQ8dVkzvRuAm0PF74afRYfg +xiGIX8msFYKzGnWb7ezcag125iEqg+xSplo6QK6vaNURlKwYQ8ZRKz1Hk1oIB4t1 +iEJL2d4nzgTkh/jlVjtTXo6cw96WT9NBT0Rg6JR4PJySlhY+ZwLi6VAxQZ8GyJo4 +YTvx1K3vhXeokKjFTxUtZdx1blX5vCBXv9LCxnjAsBCTRzE425x6UP1gp721gHGW +sqopvkUgN9vk8oigyWLeGvwsBwFTFnY672iCYXhFHs2oKTIX8yo+A2xRr8tewb9C +IsqJSC6JkLs5zbVwKdgVx1H21Uwvi7XjKir9pPp/ks12r9GNMmWc265PK1kCqCHa +oHfgzYMVVFQ3CfYbeeA8/aVf770AfC/1v+VtMse8DEqyep5q0OzOXtWIQlahYiyA +FLTzCBqcHUuRZtS4gEhOk6/Pk1HP3faUC1xGgxO5c/pd7SVMfs+Z58WJbYGFcAlC ++QIDAQABo1MwUTAdBgNVHQ4EFgQUBeKaoc7AMURxdajJ+CF8YrUsdFgwHwYDVR0j +BBgwFoAUBeKaoc7AMURxdajJ+CF8YrUsdFgwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAgEAGGpFZm0c36Eh5E8QiAg8+8U22Ao+YoF6nJnIlc/ri1pt +J5zXRM2DbCCR9uN5yckmCNIJ4PZO49QBflYGPAkF+Vd0RJYoA4k1Cq+eYcJBWtXl +ESJxeg1QAKAZ4XSasOIijebWlPIZxPGOy8HquKNMDQIm8a7g5zSE4UNJPVY3y9on +zJT7ZhntIwuM8IP6h6gotJfxBHJRWNe/g0zVITQ7vHnxSpobLbuKfY21GLl6clgI +WsePKWWo/mZYquqZz72KBUJ66YX4X7nJCvZs1sLgMnXh87n9hsxAdFlRgLuQ4ztp +mwQbDZ90mJFQLprI4rfyamuloIgOcn05yXfklRAI2P8L2/yf5xNAy+ii0OHRiMVv +jnYUet8Bca1orh7OQ9ol1XTBoCI1gknrdG5Y2IQvQhWLiS5AjIwwQYwjkSFXELtF +X8v9Fv758RA9CFlQDnsp9awNjdLss/TdH6+dNYQfTNGigIPM6oCk5nrcQqF/533W +z2WM0LNHAiQlEn0X38D0wCuRwIVzPG/AFyfsf50vSlH81/uzpyR5q3SJA8OKiCV1 +/OiW7Jv7pOtwqFjxR+m31TqaPM6PLrdasP/CNKSvGuJmtaHK4Wkc3YU9dbtQffzB +MUFwhi233gvE+nSEixse2KlzsrBVZIdz16bZXaAd20JQdq9Hceku2uVgfN1fycI= +-----END CERTIFICATE----- diff --git a/.build/server.crt b/.build/server.crt index d161ab2652..5a2bfc7b01 100644 --- a/.build/server.crt +++ b/.build/server.crt @@ -1,20 +1,30 @@ -----BEGIN CERTIFICATE----- -MIIDUjCCAjoCFAwuj6RwuZSjCGYHja8m9tbr3nFeMA0GCSqGSIb3DQEBCwUAMGgx -EzARBgNVBAoTCk15IENvbXBhbnkxCzAJBgNVBAsTAklUMRAwDgYDVQQHEwdNeSBU -b3duMQ8wDQYDVQQIEwZNb3Njb3cxCzAJBgNVBAYTAlJVMRQwEgYDVQQDEwtsb2Nh -bGhvc3RDQTAeFw0yMTA0MTAxMzA0MDBaFw0yMjA0MTAxMzA0MDBaMGMxEzARBgNV -BAoTCk15IENvbXBhbnkxCzAJBgNVBAsTAklUMRAwDgYDVQQHEwdNeSBUb3duMQ8w -DQYDVQQIEwZNb3Njb3cxCzAJBgNVBAYTAlJVMQ8wDQYDVQQDEwZzZXJ2ZXIwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8LoQbo2DFwC17gZwJ8xrPKHGX -UKxoo5UcyZ3/2zZ006TYkswssejKksuiICTMI89OD8n55pNTZkXPUH7oR2oIyxTY -SiWPiNzbEh0FOxH9Kh5gmajqM/4X44OaprmyQ56m4Y2LZO2nZ9hHoe+ZRoan3+pa -g8weOM/n/wYuXZtdElOxNsB8pg09K4gevHVaLaSBCEeQfHev51vClFdN3+orBi/r -hnQF3vdw7oMT1JSH75Ray51wRaypLIslAc2DcPFTCQJMmXXMTcAcxmjAVUGrfY+d -sSCdXnOZtd7yk+0X0bVGKLBkCTOP7QpmfOVu9bOhscDiK5EoAaDKqdHSMUfhAgMB -AAEwDQYJKoZIhvcNAQELBQADggEBAKCo2Y1uKbudA8JpV6yo35tc7Z6n03++BAdq -egUBKOiE4ze7xQ7lmlt572ptqXlU/8JuPWa2Qb/wGksR0HpVPTAeU3pbXz1dcCXC -A9wCtSxapjyCYbkDrDl2FQuK0OfJi0q71JZU66D58Qu0l45nWON30to9dSiw3zPw -Rdk7X86GHYIBHKsj7mjiy1v8jH1sXeWvThOmU6+rv8UY8VuJiu4MQDdYa0Y5KFh/ -OL3tVsi7zoNu2OXY1cTKuUpKMQPbO+WSdelYromYK2OAXaNqnC27GegPqvCFWJ2I -9NZuXYj3X+j0ydZSKVjDgCda8H68olBnO0zh44XirCBef7uTVLw= +MIIFJTCCAw0CFAKjNOhsMTYUuQngy2k291XuKOGGMA0GCSqGSIb3DQEBCwUAMEUx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl +cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjQxMTIwMDQxOTE0WhcNMjkxMTE5MDQx +OTE0WjBZMQswCQYDVQQGEwJVUzETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE +CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3Qw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDlGT9vXb93yoM1YT0GAxJI +B6/2ExUrdprd049oMVZa4Km0nqwN/xjVvQRIWozmbpvps0mCkFM1ZyL1iqZFwiJG +WcQvvIffFM1qKRMOSTLNPCbM9mfvRKsCU9gjgatdhy8xUZhz7uFGMGADnZdlNMYW +GgzMVZo0EyW7Z2QJ+ZCl8wW5IT4iswZWrJsNZU/g7HaNBrXiidDihkmQ8Kt32R0U +nqJeXMHwkQLxddmcGdDmVCKsAEUu3NcvPeAlSJsNHfGDRsf9fImRqZCsgwI8dJtA +ke/luMTttQ34aADFTmTbVk4ngVhCxgBkJ6FUDFJcp3t3nFssiisNon9k5FwtJ3hl +e/QGM9IRdBvGVcOnZZuXXK2lLtakj5UWUik2xWA0hjX+DsFo7TPwKgZy4zmWCRob +W1e1NX52bqYFWZUKYLqbizllOd98o3yed58PhbF1/IuVEuOoiKu7rNdNgzr8vgRP +pWHQNXp3maCcZq2kWybADU2LQNUKAZLSw3nClcX8QVRAfvf8IyDZ/280EYRGu99V +qLqDPLa1+3CNAb93J1ONvVjKgJwQQWy4dYFLHTYdBzXV5SOpH8YHL/1IHs9W5k28 +BdwbeMtJnOaV8rqiA6Xd4Xem111AMAigHExxG3kpSnAq6jiOX0+2V++f7qAunuC6 +B/oJATXLCbBQILr0ARtKuQIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQCn6R2fvxfs +R7nN9g6bVNJXkJrDJ+O1suVD0tkZzxZAIAFdhKnSFocJph1bC6bSZEQkhG+0WtfU +DU7m19VDHpZWZ+8LygIVikIkvj47v1/yl7TgwkhNAKXXxl6bF/AEevMUZoxT3r8S +UBFURp8QduSQ7sbDRB9qR1EWPjAXgnedzLSGkt5E6VKuVRwsTjv7QUTV8RCbOl9b +YHtTX3dtvr3PeAB5M3B6qrbpniqJfPxUt658UKrDGFr1MuZZ8ONYpdiGH8uGXZhs +9BBjp0g0xWha9LYDYRpqzlC1hqV0J/9jz9QdS9HHPsqa8PvB/YwaDGQm/RSRMUbU +x0wip0me45WU5pLD1djEGQBlxCGgQXIJsebzipdUsayA4MgY3s2lBj2qsPOqyNoP +dFohMm2+Ypi8UAjEbeGY4XsCODLeCvPx24HyjJUORm9uuPCunSBhtgiEBTJrNwHL +F7T1+/g9gVSwCsz4MqceO7IooJ2omSpwk7xrzocccFb1HGR/tE9GxRLNHiyTfx9s +FN9SNOih5DCcOFOiw0vF1qKHk6CAJ0UCBzVWl3YO9OgnFX4FbRYHd3PduWR+fSkd +icBs2AiOKPbOU8yXR8CE6uZiDoN6A27KOE07adZEWBMwd4us7uBHGgnqqYuwPI3d +nqC8srMQ07fw8HyXn7ojPxXyCk+2d6zVgA== -----END CERTIFICATE----- diff --git a/.build/server.key b/.build/server.key index b6dd15913f..f2a7e607b2 100644 --- a/.build/server.key +++ b/.build/server.key @@ -1,27 +1,52 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvC6EG6NgxcAte4GcCfMazyhxl1CsaKOVHMmd/9s2dNOk2JLM -LLHoypLLoiAkzCPPTg/J+eaTU2ZFz1B+6EdqCMsU2Eolj4jc2xIdBTsR/SoeYJmo -6jP+F+ODmqa5skOepuGNi2Ttp2fYR6HvmUaGp9/qWoPMHjjP5/8GLl2bXRJTsTbA -fKYNPSuIHrx1Wi2kgQhHkHx3r+dbwpRXTd/qKwYv64Z0Bd73cO6DE9SUh++UWsud -cEWsqSyLJQHNg3DxUwkCTJl1zE3AHMZowFVBq32PnbEgnV5zmbXe8pPtF9G1Riiw -ZAkzj+0KZnzlbvWzobHA4iuRKAGgyqnR0jFH4QIDAQABAoIBADnMS7U1dAao5Q9X -GrcPnP9dm63vEFU/URA7eLTZ/prZWntOczmTFz4I4lSUbNjqcsS2IsIHqN5nvi9T -uPbc4Ft9DJT2CR1R2wvKP3GY2AibBCOFbpUojPWHYqeAZ+6xyCvXgSL8R+YwBgTS -XwYD3F35b0CH1Iy/xFOsR5i8FXj7He8lOBA76fPrH64DEBTB2zUGztu4qpfv57v5 -sfTISi2ZOqPpXc+8Fw0RPeVWQgSRUh7U3lzL8bNBod6lYcjkhF5Yqet4MdHSyWMT -aKdZ2GRHHdWjpyx6J0cD/bjjaTSDqTD8r265mPzY6bq4t6UQMq4KeDnbeiextDf4 -ELT90YUCgYEA6insCSDJddhFZ51guPPyYE9GL8QQfnzLvFOA4qWsi0u9SAbJ9aS0 -vABaEuot0PyYPwMYq7st07z3DSKno4tisPJ2X7v2nEWxv8MjgczWpltPTPaEdmZE -WGIwG3pyh5wJk1b3VpBJB5jkjtJfGmUJaezU10bzm4QhPiEawemCjucCgYEAzbri -/6EZPbJJa9hGtkJEEVLwbQ2U/CE7mZXL+AcPlS3qMSwyz/1OArPxdTRR4S3sYRRO -fsRDBL8LED/kKUDWNni/zkzmFf/hVkmGd9zc6eif4Zr1gmtHlsHQdaMGxsomzxGL -qydBqDN+4TMmHmUmp2jR/0LIF5UMlNoCvHcxgfcCgYEAnOBNE6h1j4++n7Yd0IsO -PFufx+xwqGzvCVJgLHeV6xRo0NJLh1g7BSCvN7DP1Q0E6mImqxaRkyMr2A75hGWj -TqyBhY2ln/hJJxGSvij/PSA7NnKJN9E3xIazeBVGmXd+Ksm+lq2/X2mc5domgMZj -0iUqSrdsCSoyIy+Gf5bzMs0CgYBcquG044vLDpOj0DeJwS+H3iQN+yAwsYd3FtJZ -VlTejV//5ji9Fwwci5EnifmXxGfFErCIyT6m1KbXGvBa5KmYv6sl8d1x62BEzbmU -JBgeBHp/1JzhshD9BzAuzNAwmr4AZ5bR8UzRxuBP8AorhsRyg/STVjFq7ehM5CZ3 -Xfke4QKBgHCPo3R/oi/E2E7OIM/ELlDpvPQTMrV+rYlMFsy3JRvataIqEGnVbhOR -4dQHEM3u2bJxN79wUYYmZuymVB78wKxTn6hGWcGoM6Y8mrJjVv9D8V0Gc0sWw5pF -KZxuCgzjaN2T7i1LsXEV3gaQrKItToEpGPzSI23egFaG6g5SFqBt ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDlGT9vXb93yoM1 +YT0GAxJIB6/2ExUrdprd049oMVZa4Km0nqwN/xjVvQRIWozmbpvps0mCkFM1ZyL1 +iqZFwiJGWcQvvIffFM1qKRMOSTLNPCbM9mfvRKsCU9gjgatdhy8xUZhz7uFGMGAD +nZdlNMYWGgzMVZo0EyW7Z2QJ+ZCl8wW5IT4iswZWrJsNZU/g7HaNBrXiidDihkmQ +8Kt32R0UnqJeXMHwkQLxddmcGdDmVCKsAEUu3NcvPeAlSJsNHfGDRsf9fImRqZCs +gwI8dJtAke/luMTttQ34aADFTmTbVk4ngVhCxgBkJ6FUDFJcp3t3nFssiisNon9k +5FwtJ3hle/QGM9IRdBvGVcOnZZuXXK2lLtakj5UWUik2xWA0hjX+DsFo7TPwKgZy +4zmWCRobW1e1NX52bqYFWZUKYLqbizllOd98o3yed58PhbF1/IuVEuOoiKu7rNdN +gzr8vgRPpWHQNXp3maCcZq2kWybADU2LQNUKAZLSw3nClcX8QVRAfvf8IyDZ/280 +EYRGu99VqLqDPLa1+3CNAb93J1ONvVjKgJwQQWy4dYFLHTYdBzXV5SOpH8YHL/1I +Hs9W5k28BdwbeMtJnOaV8rqiA6Xd4Xem111AMAigHExxG3kpSnAq6jiOX0+2V++f +7qAunuC6B/oJATXLCbBQILr0ARtKuQIDAQABAoICAAP97y6VPnPLjgLVJxKbfssa +afz0IxG+9ZH11xrpUl6itjpNBUte8LN97jaF8DLhf9FJtZ2mWHJtODBfzw4wnldf +X/O2Y1MZbvHeXA3LHznXX9ROJ9krg/2DCsu/MIZgh5hvQLEmdK6Iw1q7LH5Pz6YA +Pea/YbPUfWGsVC0rUaBFB/C/oEnk/v0g8VIbFZIvAWrRw6oT0JWESJrGr5b9RYxm +Ljo0Mt0dyorjP/YAUI6u4R+VOp9g+Dvpv7909vfg/j2u5k20e/lgI1xdXqGnvrIx ++/4V/KwPeob9TIqJ/bTOGaFtF5j3dirImP8Yq6rsvSuqodkSSELeAor2XEsDumby +PqJY1MIO9DuZSdqf+Cofgzbd6mpeMAwueb+hfBw8AIMG3M9Xj1uDuU+tjsVA79Er +H9acPxLukGjYP5SY2Mo8hLFLLurpjtcDpYdOP2Wh7PBDwHR8anmPQru2rZXT80NY +j3fXNqnTTFbHuntmZ2qWJovmOuKocU5GEm/QCW/f6miqR9Hzc2vbWaIoEO54vcF6 +eS4iLEkAOfmakz3Sno2AXS1jJI6+2v1899cBINvgpATCMkmXnwFDwr9gNYujwlpF +Yl3QM8Vh9dnVt04oyum5x4sz/mTKj5e9O988iqlOkgID4HBVpy/dwYHsHE+XgDDY +yiFetJ/n0+45QHhyvSwBAoIBAQDnrPz2xCbR03KQwZN2DnZClLVFkZe3tZxR6UsY +63yDTrA0ZMJ8AtE/tX79/Iu7gPidNTCrVmOuelf5q3y3AMo6nlKMCc3tIKr6QtaC +99RtHq5p0T3/TS9tWbGjmxEzyx00R3wz5fSypX76qnQLHs6EmrLxFUNmsHIQS2nH +jWvT1+TdmfmogZ/9RaHyBjHGkDfTmlfEKc7/TleE9XsW+G0cGli3fIO0iY0hJTLd +b65X5Gm0URCqsZgIzD99enIvee13Gw8aUJUt8tJZXQHtOWBu491MLd2AVPQ/7eZa +tl/HtjdMj2E3n5NXTie3laRCX+p9mK6087nE7u3JqPqUXU4BAoIBAQD9Jv3hZeii +0pDgLYgiFVds5n2S4CEB4WOT9wn2vUIrYTSjgjAPfsgeJs6N6+WArwaIJrl4tTK4 +m0VjUG394plvyExU8hNZ7hw0E/33rwsKySnkwUFZtOgbsOgUjajRDfFYsqsDhLK0 +o3dY1M+mdYvU9OBo3EhgFy3fYBhtdGIq/4/3kSM6CARQIjddW2pdbB7pyv3qz0mH +6fpzPXWLIex+WBzRVEz7VPPD4coV3LEhmtdPju4RqFPbHS+OpECun8pyaNt14DRr +t216MiyJGNV74zTLELioVHlhlaPvsWnnIeI+2uhhCgQ8UvHn69x2wiAgLlx/e+RD +qPiINhm/xey5AoIBACCASjSsK+3/xfC8110Whkys5AlQdYJWPgnXuqtSTfN11I5l +HEudcZGIerpS9Z9mZnpXfe5rfix6CWGDR0m9GKHEmDwBHByKGrJlMgbJkcmFJl69 +9f6c62xhyuPy2yTy97Pf23LEbeGqCfhMdV8iAULlGPltTDlZw4a5ratLEbd0cC0O +btHO7YzwedmkONNsZAiRfIKOgvWaHfkPHyeHznbE03FaTHfFXEEsIMij5Ed8Sb/8 +J2Rq6bNCRB3sUZyLdF7jMuk0KNl7WTskKyMGi5rC6MbJIGvifymAzHIpZ6Jy06sv +6imNf3QeCMBeg96z6geYpdnI32TbSAykYhLyTAECggEAOowrCVcdX5LdaMt/AYr4 +BjqkbjShzaKH+i+XQVZyGEBKAUrZvKuwsrB88vvMv187Xn++Q3l8uo9Gk/qFBcPD +gsPLS5YU/aaBJVY+VWtJXXw60SoU6B9b0xOuCRreIUNdPwtLW+vzvK1Vq9jEEZZ7 ++YuM3xObNYYG2POLkrzo+1LRxArwH7q87J+NOG0tA2A/IgkNgqHgOqvVfZOIPN5i +qLHOMGeTykjSe8obh8Tbvo7mHwNKchEBG9r7Jb09LGXOV3mC0BdDaGoqyqkR/b8d +mKJqklBStLOcwwHtwUDB4m/GuIy+U7sSUbVJNz8oZNruvSKbx+wqVa+dkzsX529q +GQKCAQBVzafsrfp3yZKa62R7EMtQh6pHDIKvUzZRwxsj4QzQ1y4Rrb6ceXKxI3EQ +ZK6f1Lte+/ifRn8ZsxQOnjNzO9meOco/7CSNGCCcqO/XVN9ixDdF8lzjIsuRqfkT +lsYy7Zo+ZRDUj73UROBvBJtX4jP5It1B/ISKxHxyBFQiB+UtldLl1H+dmGN9LVnF +583i/vTEcLsj9+8yUU8L46sLKfOhNiSBY8D8oKD9Yht0p9SeDxB/r4Rq8Te5Xp1o +FobswNohYBj2rj9+d24uMcpI5nx33JoRkW7VyAXsq8t4b7ei5/sbwuL25NUXhIxf +mMKDxHebdrFY2ADhWLkWus0ik7JA +-----END PRIVATE KEY----- diff --git a/.devcontainer/db/Dockerfile b/.devcontainer/db/Dockerfile index 76eb48a2fa..85efd91832 100644 --- a/.devcontainer/db/Dockerfile +++ b/.devcontainer/db/Dockerfile @@ -1,3 +1,3 @@ -FROM postgres:alpine -RUN apk update && \ - apk add --no-cache openssl +FROM postgres:17 +RUN apt-get update && \ + apt-get install -y --no-install-recommends openssl postgresql-17-postgis-3 diff --git a/.devcontainer/db/init-db.sh b/.devcontainer/db/init-db.sh index 24804402fe..b4ccb371e9 100644 --- a/.devcontainer/db/init-db.sh +++ b/.devcontainer/db/init-db.sh @@ -19,7 +19,10 @@ echo "Configuring md5 authentication in $PGDATA/pg_hba.conf" echo 'local all all trust' > $PGDATA/pg_hba.conf echo "host all all all md5" >> $PGDATA/pg_hba.conf -# Standard test account for Npgsql -psql -U postgres -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" -psql -U postgres -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" -psql -U postgres -c "CREATE EXTENSION ltree" npgsql_tests +# Standard test account for Npgsql and enable extensions +psql -U postgres <> /etc/apt/sources.list.d/pgdg.list' + # Automated repository configuration + sudo apt install -y postgresql-common + sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -v ${{ matrix.pg_major }} -y sudo apt-get update -qq sudo apt-get install -qq postgresql-${{ matrix.pg_major }} export PGDATA=/etc/postgresql/${{ matrix.pg_major }}/main - sudo cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} $PGDATA - sudo chmod 600 $PGDATA/{server.crt,server.key} - sudo chown postgres $PGDATA/{server.crt,server.key} + sudo cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} $PGDATA + sudo chmod 600 $PGDATA/{server.crt,server.key,ca.crt} + sudo chown postgres $PGDATA/{server.crt,server.key,ca.crt} + # Create the npgsql_tests database + sudo -u postgres psql -c "CREATE DATABASE npgsql_tests" + # Create npgsql_tests user with md5 password 'npgsql_tests' sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" sudo -u postgres psql -c "CREATE USER npgsql_tests_ssl SUPERUSER PASSWORD 'npgsql_tests_ssl'" sudo -u postgres psql -c "CREATE USER npgsql_tests_nossl SUPERUSER PASSWORD 'npgsql_tests_nossl'" - # To disable PostGIS for prereleases (because it usually isn't available until late), surround with the following: - if [ -z "${{ matrix.pg_prerelease }}" ]; then + # Install PostGIS if required + if [[ $NPGSQL_TEST_POSTGIS == 'true' ]]; then sudo apt-get install -qq postgresql-${{ matrix.pg_major }}-postgis-${{ env.postgis_version }} fi @@ -106,6 +128,7 @@ jobs: sudo sed -i 's/max_connections = 100/max_connections = 500/' $PGDATA/postgresql.conf sudo sed -i 's/#ssl = off/ssl = on/' $PGDATA/postgresql.conf + sudo sed -i "s|ssl_ca_file =|ssl_ca_file = '$PGDATA/ca.crt' #|" $PGDATA/postgresql.conf sudo sed -i "s|ssl_cert_file =|ssl_cert_file = '$PGDATA/server.crt' #|" $PGDATA/postgresql.conf sudo sed -i "s|ssl_key_file =|ssl_key_file = '$PGDATA/server.key' #|" $PGDATA/postgresql.conf sudo sed -i 's/#password_encryption = md5/password_encryption = scram-sha-256/' $PGDATA/postgresql.conf @@ -133,7 +156,7 @@ jobs: sudo -u postgres psql -c "CREATE USER npgsql_tests_scram SUPERUSER PASSWORD 'npgsql_tests_scram'" # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v3 + #- uses: actions/checkout@v6 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 @@ -156,29 +179,7 @@ jobs: unzip pgsql.zip -x 'pgsql/include/**' 'pgsql/doc/**' 'pgsql/pgAdmin 4/**' 'pgsql/StackBuilder/**' # Match Npgsql CI Docker image and stash one level up - cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} pgsql - - # Find OSGEO version number - OSGEO_VERSION=$(\ - curl -Ls https://download.osgeo.org/postgis/windows/pg${{ matrix.pg_major }} | - sed -n 's/.*>postgis-bundle-pg${{ matrix.pg_major }}-\(${{ env.postgis_version }}.[0-9]*.[0-9]*\)x64.zip<.*/\1/p' | - tail -n 1) - if [ -z "$OSGEO_VERSION" ]; then - OSGEO_VERSION=$(\ - curl -Ls https://download.osgeo.org/postgis/windows/pg${{ matrix.pg_major }}/archive | - sed -n 's/.*>postgis-bundle-pg${{ matrix.pg_major }}-\(${{ env.postgis_version }}.[0-9]*.[0-9]*\)x64.zip<.*/\1/p' | - tail -n 1) - POSTGIS_PATH="archive/" - else - POSTGIS_PATH="" - fi - - # Install PostGIS - echo "Installing PostGIS (version: ${OSGEO_VERSION})" - POSTGIS_FILE="postgis-bundle-pg${{ matrix.pg_major }}-${OSGEO_VERSION}x64" - curl -o postgis.zip -L https://download.osgeo.org/postgis/windows/pg${{ matrix.pg_major }}/${POSTGIS_PATH}${POSTGIS_FILE}.zip - unzip postgis.zip -d postgis - cp -a postgis/$POSTGIS_FILE/. pgsql/ + cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} pgsql # Start PostgreSQL pgsql/bin/initdb -D pgsql/PGDATA -E UTF8 -U postgres @@ -192,7 +193,10 @@ jobs: sed -i "s|#synchronous_standby_names =|synchronous_standby_names = 'npgsql_test_sync_standby' #|" pgsql/PGDATA/postgresql.conf sed -i "s|#synchronous_commit =|synchronous_commit = local #|" pgsql/PGDATA/postgresql.conf sed -i "s|#max_prepared_transactions = 0|max_prepared_transactions = 100|" pgsql/PGDATA/postgresql.conf - pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key' start + pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key -c ssl_ca_file=../ca.crt' start + + # Create npgsql_tests database + pgsql/bin/psql -U postgres -c "CREATE DATABASE npgsql_tests" # Create npgsql_tests user with md5 password 'npgsql_tests' pgsql/bin/psql -U postgres -c "CREATE ROLE npgsql_tests SUPERUSER LOGIN PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" @@ -207,7 +211,7 @@ jobs: sed -i "s|#password_encryption = md5|password_encryption = scram-sha-256|" pgsql/PGDATA/postgresql.conf fi - pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key' restart + pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key -c ssl_ca_file=../ca.crt' restart pgsql/bin/psql -U postgres -c "CREATE ROLE npgsql_tests_scram SUPERUSER LOGIN PASSWORD 'npgsql_tests_scram'" @@ -229,15 +233,20 @@ jobs: - name: Start PostgreSQL ${{ matrix.pg_major }} (MacOS) if: startsWith(matrix.os, 'macos') run: | - PGDATA=/usr/local/var/postgresql@${{ matrix.pg_major }} + brew update + brew install postgresql@${{ matrix.pg_major }} + + PGDATA=/opt/homebrew/var/postgresql@${{ matrix.pg_major }} sudo sed -i '' 's/#ssl = off/ssl = on/' $PGDATA/postgresql.conf - cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} $PGDATA - chmod 600 $PGDATA/{server.crt,server.key} + sudo sed -i '' "s/#ssl_ca_file =/ssl_ca_file = 'ca.crt' #/" $PGDATA/postgresql.conf + cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} $PGDATA + chmod 600 $PGDATA/{server.crt,server.key,ca.crt} - postgreService=$(brew services list | grep -oe "postgresql\S*") + postgreService=$(brew services list | grep -oe "postgresql@${{ matrix.pg_major }}\S*") brew services start $postgreService + export PATH="/opt/homebrew/opt/postgresql@${{ matrix.pg_major }}/bin:$PATH" echo "Check PostgreSQL service is running" i=5 COMMAND='pg_isready' @@ -253,6 +262,9 @@ jobs: sleep 5 done + # Create the npgsql_tests database + psql -c "CREATE DATABASE npgsql_tests" postgres + # Create npgsql_tests user with md5 password 'npgsql_tests' psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" postgres @@ -295,21 +307,18 @@ jobs: done psql -c "CREATE USER npgsql_tests_scram SUPERUSER PASSWORD 'npgsql_tests_scram'" postgres - # TODO: Once test/Npgsql.Specification.Tests work, switch to just testing on the solution - name: Test - run: | - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.Tests --logger "GitHubActions;report-warnings=false" - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.DependencyInjection.Tests --logger "GitHubActions;report-warnings=false" + run: dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} --logger "GitHubActions;report-warnings=false" --blame-crash --blame-hang-timeout 30s shell: bash - - name: Test Plugins - if: "!startsWith(matrix.os, 'macos')" - run: | - if [ -z "${{ matrix.pg_prerelease }}" ]; then - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.PluginTests --logger "GitHubActions;report-warnings=false" - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.NodaTime.Tests --logger "GitHubActions;report-warnings=false" - fi - shell: bash + - name: Upload Test Hang Dumps + uses: actions/upload-artifact@v7 + if: failure() + with: + name: test-hang-dumps + path: | + **/*.dmp + **/*_Sequence.xml - id: analyze_tag name: Analyze tag @@ -326,16 +335,16 @@ jobs: publish-ci: needs: build - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event_name == 'push' && github.repository == 'npgsql/npgsql' environment: myget steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: NuGet Cache - uses: actions/cache@v3 + uses: actions/cache@v5 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} @@ -343,15 +352,13 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} + uses: actions/setup-dotnet@v5.2.0 - name: Pack - run: dotnet pack Npgsql.sln --configuration Release --output nupkgs --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true + run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v7 with: name: Npgsql.CI path: nupkgs @@ -368,24 +375,22 @@ jobs: release: needs: build - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event_name == 'push' && startsWith(github.repository, 'npgsql/') && needs.build.outputs.is_release == 'true' environment: nuget.org steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} + uses: actions/setup-dotnet@v5.2.0 - name: Pack - run: dotnet pack --configuration Release --output nupkgs -p:ContinuousIntegrationBuild=true + run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v7 with: name: Npgsql.Release path: nupkgs diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 945fa9bd46..0000000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,76 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: - - main - - 'hotfix/**' - - 'release/**' - pull_request: - # The branches below must be a subset of the branches above - branches: - - main - - 'hotfix/**' - - 'release/**' - schedule: - - cron: '21 0 * * 4' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'csharp' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Learn more about CodeQL language support at https://git.io/codeql-language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml new file mode 100644 index 0000000000..3fc070a421 --- /dev/null +++ b/.github/workflows/native-aot.yml @@ -0,0 +1,204 @@ +name: NativeAOT + +on: + push: + branches: + - main + - 'hotfix/**' + tags: + - '*' + pull_request: + +permissions: + contents: read + +# Cancel previous PR branch commits (head_ref is only defined on PRs) +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true + AOT_Compat: | + param([string]$targetFramework) + + $publishOutput = dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f $targetFramework -p:RootNpgsql=true + + $actualWarningCount = 0 + + foreach ($line in $($publishOutput -split "`r`n")) + { + if ($line -like "*analysis warning IL*") + { + Write-Host $line + + $actualWarningCount += 1 + } + } + + $testPassed = 0 + + $binaryPath = "test/Npgsql.NativeAotTests/bin/Release/$targetFramework/linux-x64/native/" + if (-not (Test-Path -LiteralPath $binaryPath)) + { + $testPassed = 1 + Write-Host "Could not publish app, output was:" + foreach ($line in $($publishOutput -split "`r`n")) + { + Write-Host $line + } + } + + Write-Host "Actual warning count is:", $actualWarningCount + $expectedWarningCount = 0 + + if ($actualWarningCount -ne $expectedWarningCount) + { + $testPassed = 2 + Write-Host "Actual warning count:", $actualWarningCount, "is not as expected. Expected warning count is:", $expectedWarningCount + } + + Exit $testPassed + # Uncomment and edit the following to use nightly/preview builds + # nuget_config: | + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # +jobs: + full: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ ubuntu-24.04 ] + pg_major: [ 18 ] + tfm: [ net10.0 ] + + steps: + - name: Checkout + uses: actions/checkout@v6 + + # - name: Setup nuget config + # run: echo "$nuget_config" > NuGet.config + + - name: NuGet Cache + uses: actions/cache@v5 + with: + path: ~/.nuget/packages + key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} + restore-keys: | + ${{ runner.os }}-nuget- + + - name: Setup .NET Core SDK + uses: actions/setup-dotnet@v5.2.0 + + - name: Write script + run: echo "$AOT_Compat" > test-aot-compatibility.ps1 + + - name: Publish and check for trimmer warnings + run: ./test-aot-compatibility.ps1 ${{ matrix.tfm }} + shell: pwsh + trimmed: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-24.04] + pg_major: [ 18 ] + tfm: [ net10.0 ] + + steps: + - name: Checkout + uses: actions/checkout@v6 + + # - name: Setup nuget config + # run: echo "$nuget_config" > NuGet.config + + - name: NuGet Cache + uses: actions/cache@v5 + with: + path: ~/.nuget/packages + key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} + restore-keys: | + ${{ runner.os }}-nuget- + + - name: Setup .NET Core SDK + uses: actions/setup-dotnet@v5.2.0 + + - name: Start PostgreSQL + run: | + sudo systemctl start postgresql.service + sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" + sudo -u postgres psql -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" + + - name: Build + run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f ${{ matrix.tfm }} -p:OptimizationPreference=Size + shell: bash + + # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) + #- uses: actions/checkout@v6 + #- name: Setup tmate session + # uses: mxschmitt/action-tmate@v3 + + - name: Run + run: test/Npgsql.NativeAotTests/bin/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests + + - name: Write binary size to summary + run: | + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY + + - name: Dump mstat + run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + + - name: Upload mstat + uses: actions/upload-artifact@v7 + with: + name: npgsql.mstat + path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" + retention-days: 3 + + - name: Upload codedgen dgml + uses: actions/upload-artifact@v7 + with: + name: npgsql.codegen.dgml.xml + path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" + retention-days: 3 + + - name: Upload scan dgml + uses: actions/upload-artifact@v7 + with: + name: npgsql.scan.dgml.xml + path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" + retention-days: 3 + + - name: Assert binary size + run: | + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" + + if (( size > 5242880 )); then + echo "Binary size exceeds 5MB threshold" + exit 1 + fi diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml deleted file mode 100644 index 118c2e3e28..0000000000 --- a/.github/workflows/rich-code-nav.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Rich Code Navigation - -on: - push: - branches: - - main - - stable - tags: - - '*' - -env: - dotnet_sdk_version: '7.0.100' - DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true - -jobs: - build: - runs-on: windows-latest - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: NuGet Cache - uses: actions/cache@v3 - with: - path: ~/.nuget/packages - key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} - restore-keys: | - ${{ runner.os }}-nuget- - - - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} - - - name: Build - run: dotnet build Npgsql.sln --configuration Debug - shell: bash - - - name: Rich Navigation Indexing - uses: microsoft/RichCodeNavIndexer@v0.1 - with: - languages: csharp - repo-token: ${{ github.token }} diff --git a/.github/workflows/trigger-doc-build.yml b/.github/workflows/trigger-doc-build.yml index dfbe89601e..e8783c9e16 100644 --- a/.github/workflows/trigger-doc-build.yml +++ b/.github/workflows/trigger-doc-build.yml @@ -8,9 +8,12 @@ on: branches: - docs +permissions: + contents: read + jobs: build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest steps: - name: Trigger documentation build run: | diff --git a/.gitignore b/.gitignore index 669922429f..da1d49e515 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ artifacts/ *.ide/ .vs/ TestResult.xml +*.lscache diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 6bf5ff40c5..a505eb8cfc 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -2,10 +2,6 @@ // List of extensions which should be recommended for users of this workspace. "recommendations": [ "ms-dotnettools.csharp", - "formulahendry.dotnet-test-explorer", - ], - // List of extensions recommended by VS Code that should not be recommended for users of this workspace. - "unwantedRecommendations": [ - + "ms-dotnettools.csdevkit" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 3f641af41f..22993a3100 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,3 @@ { - "omnisharp.defaultLaunchSolution": "Npgsql.sln", - "dotnet-test-explorer.testProjectPath": "**/*.Tests.csproj" + "dotnet.defaultSolution": "Npgsql.slnx" } \ No newline at end of file diff --git a/Directory.Build.props b/Directory.Build.props index 6d7a7869ab..cdd05be625 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@ - + - 7.0.0 + 11.0.0-preview.1 latest true enable @@ -10,7 +10,7 @@ true true - Copyright 2022 © The Npgsql Development Team + Copyright 2026 © The Npgsql Development Team Npgsql PostgreSQL https://github.com/npgsql/npgsql @@ -19,16 +19,9 @@ true snupkg true - $(NoWarn);NETSDK1138 true - - - disable - $(NoWarn);CS8632;CS8600 - - diff --git a/Directory.Packages.props b/Directory.Packages.props index 1eed6bee0c..bbe474665f 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,45 +1,53 @@ - - - - - - - - - - - - - - + + + 10.0.7 + 10.0.7 + + + 10.0.7 + 10.0.7 + - - - + + + + - - + + - - - + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + + + - - - + + + + + + diff --git a/LICENSE b/LICENSE index b102b0e388..5f0d26b868 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2002-2021, Npgsql +Copyright (c) 2002-2026, Npgsql Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement diff --git a/Npgsql.sln b/Npgsql.sln deleted file mode 100644 index 9e3ebb7af6..0000000000 --- a/Npgsql.sln +++ /dev/null @@ -1,202 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.28822.285 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{8537E50E-CF7F-49CB-B4EF-3E2A1B11F050}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{ED612DB1-AB32-4603-95E7-891BACA71C39}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql", "src\Npgsql\Npgsql.csproj", "{9D13B739-62B1-4190-B386-7A9547304EB3}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Tests", "test\Npgsql.Tests\Npgsql.Tests.csproj", "{E9C258D7-0D8E-4E6A-9857-5C6438591755}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Benchmarks", "test\Npgsql.Benchmarks\Npgsql.Benchmarks.csproj", "{8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Json.NET", "src\Npgsql.Json.NET\Npgsql.Json.NET.csproj", "{9CBE603F-6746-411D-A5FD-CB2C948CD7D0}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.NodaTime", "src\Npgsql.NodaTime\Npgsql.NodaTime.csproj", "{D8DF12D6-FA70-4653-BD8F-C188944836DE}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.PluginTests", "test\Npgsql.PluginTests\Npgsql.PluginTests.csproj", "{9BD7FC3D-6956-42A8-A586-2558C499EBA2}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.NetTopologySuite", "src\Npgsql.NetTopologySuite\Npgsql.NetTopologySuite.csproj", "{6CB12050-DC9B-4155-BADD-BFDD54CDD70F}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.GeoJSON", "src\Npgsql.GeoJSON\Npgsql.GeoJSON.csproj", "{F7C53EBD-0075-474F-A083-419257D04080}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Specification.Tests", "test\Npgsql.Specification.Tests\Npgsql.Specification.Tests.csproj", "{A77E5FAF-D775-4AB4-8846-8965C2104E60}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{004A2E0F-D34A-44D4-8DF0-D2BC63B57073}" - ProjectSection(SolutionItems) = preProject - .editorconfig = .editorconfig - Directory.Build.props = Directory.Build.props - Directory.Packages.props = Directory.Packages.props - README.md = README.md - global.json = global.json - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.SourceGenerators", "src\Npgsql.SourceGenerators\Npgsql.SourceGenerators.csproj", "{63026A19-60B8-4906-81CB-216F30E8094B}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.NodaTime.Tests", "test\Npgsql.NodaTime.Tests\Npgsql.NodaTime.Tests.csproj", "{C00D2EB1-5719-4372-9E1C-5ED05DC23A00}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.OpenTelemetry", "src\Npgsql.OpenTelemetry\Npgsql.OpenTelemetry.csproj", "{DA29F063-1828-47D8-B051-800AF7C9A0BE}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Github", "Github", "{BA7B6F53-D24D-45AC-927A-266857EA8D1E}" - ProjectSection(SolutionItems) = preProject - .github\workflows\build.yml = .github\workflows\build.yml - .github\dependabot.yml = .github\dependabot.yml - .github\workflows\codeql-analysis.yml = .github\workflows\codeql-analysis.yml - .github\workflows\rich-code-nav.yml = .github\workflows\rich-code-nav.yml - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.DependencyInjection", "src\Npgsql.DependencyInjection\Npgsql.DependencyInjection.csproj", "{B58E12EB-E43D-4D77-894E-5157D2269836}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.DependencyInjection.Tests", "test\Npgsql.DependencyInjection.Tests\Npgsql.DependencyInjection.Tests.csproj", "{EB2530FC-69F7-4DCB-A8B3-3671A157ED32}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Debug|x86 = Debug|x86 - Release|Any CPU = Release|Any CPU - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|x86.ActiveCfg = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|x86.Build.0 = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|Any CPU.Build.0 = Release|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|x86.ActiveCfg = Release|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|x86.Build.0 = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|x86.ActiveCfg = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|x86.Build.0 = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|Any CPU.Build.0 = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|x86.ActiveCfg = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|x86.Build.0 = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|x86.ActiveCfg = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|x86.Build.0 = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|Any CPU.ActiveCfg = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|Any CPU.Build.0 = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|x86.ActiveCfg = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|x86.Build.0 = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|x86.ActiveCfg = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|x86.Build.0 = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|Any CPU.Build.0 = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|x86.ActiveCfg = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|x86.Build.0 = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|x86.ActiveCfg = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|x86.Build.0 = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|Any CPU.Build.0 = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|x86.ActiveCfg = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|x86.Build.0 = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|x86.ActiveCfg = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|x86.Build.0 = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|Any CPU.Build.0 = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|x86.ActiveCfg = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|x86.Build.0 = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|x86.ActiveCfg = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|x86.Build.0 = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|Any CPU.Build.0 = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|x86.ActiveCfg = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|x86.Build.0 = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|Any CPU.Build.0 = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|x86.ActiveCfg = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|x86.Build.0 = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|Any CPU.ActiveCfg = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|Any CPU.Build.0 = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|x86.ActiveCfg = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|x86.Build.0 = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|Any CPU.Build.0 = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|x86.ActiveCfg = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|x86.Build.0 = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|Any CPU.ActiveCfg = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|Any CPU.Build.0 = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|x86.ActiveCfg = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|x86.Build.0 = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|x86.ActiveCfg = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|x86.Build.0 = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|Any CPU.Build.0 = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|x86.ActiveCfg = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|x86.Build.0 = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|x86.ActiveCfg = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|x86.Build.0 = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|Any CPU.Build.0 = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|x86.ActiveCfg = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|x86.Build.0 = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|Any CPU.Build.0 = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|x86.ActiveCfg = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|x86.Build.0 = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|Any CPU.Build.0 = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|x86.ActiveCfg = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|x86.Build.0 = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|Any CPU.Build.0 = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|x86.ActiveCfg = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|x86.Build.0 = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|Any CPU.ActiveCfg = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|Any CPU.Build.0 = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|x86.ActiveCfg = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|x86.Build.0 = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|Any CPU.Build.0 = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|x86.ActiveCfg = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|x86.Build.0 = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|Any CPU.ActiveCfg = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|Any CPU.Build.0 = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|x86.ActiveCfg = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|x86.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(NestedProjects) = preSolution - {9D13B739-62B1-4190-B386-7A9547304EB3} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {E9C258D7-0D8E-4E6A-9857-5C6438591755} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {D8DF12D6-FA70-4653-BD8F-C188944836DE} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {9BD7FC3D-6956-42A8-A586-2558C499EBA2} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {F7C53EBD-0075-474F-A083-419257D04080} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {A77E5FAF-D775-4AB4-8846-8965C2104E60} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {63026A19-60B8-4906-81CB-216F30E8094B} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {DA29F063-1828-47D8-B051-800AF7C9A0BE} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {BA7B6F53-D24D-45AC-927A-266857EA8D1E} = {004A2E0F-D34A-44D4-8DF0-D2BC63B57073} - {B58E12EB-E43D-4D77-894E-5157D2269836} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {C90AEECD-DB4C-4BE6-B506-16A449852FB8} - EndGlobalSection - GlobalSection(MonoDevelopProperties) = preSolution - StartupItem = Npgsql.csproj - EndGlobalSection -EndGlobal diff --git a/Npgsql.slnx b/Npgsql.slnx new file mode 100644 index 0000000000..e69a6728c8 --- /dev/null +++ b/Npgsql.slnx @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Npgsql.sln.DotSettings b/Npgsql.slnx.DotSettings similarity index 97% rename from Npgsql.sln.DotSettings rename to Npgsql.slnx.DotSettings index 69c98554d3..7aa838ea54 100644 --- a/Npgsql.sln.DotSettings +++ b/Npgsql.slnx.DotSettings @@ -1,4 +1,4 @@ - + DO_NOT_SHOW DO_NOT_SHOW DO_NOT_SHOW @@ -91,6 +91,7 @@ True True True + True True True True @@ -121,6 +122,8 @@ True True True + True + True True True True diff --git a/README.md b/README.md index d8e53746f1..2b7bfef019 100644 --- a/README.md +++ b/README.md @@ -17,10 +17,14 @@ For the full documentation, please visit [the Npgsql website](https://www.npgsql Here's a basic code snippet to get you started: ```csharp +using Npgsql; + var connString = "Host=myserver;Username=mylogin;Password=mypass;Database=mydatabase"; -await using var conn = new NpgsqlConnection(connString); -await conn.OpenAsync(); +var dataSourceBuilder = new NpgsqlDataSourceBuilder(connString); +var dataSource = dataSourceBuilder.Build(); + +var conn = await dataSource.OpenConnectionAsync(); // Insert some data await using (var cmd = new NpgsqlCommand("INSERT INTO data (some_field) VALUES (@p)", conn)) diff --git a/global.json b/global.json index 15a6b04b3b..8e7ec085a0 100644 --- a/global.json +++ b/global.json @@ -1,7 +1,7 @@ { "sdk": { - "version": "7.0.100", + "version": "10.0.200", "rollForward": "latestMajor", - "allowPrerelease": "true" + "allowPrerelease": false } } diff --git a/src/.editorconfig b/src/.editorconfig index 3137d00229..6574a9291a 100644 --- a/src/.editorconfig +++ b/src/.editorconfig @@ -12,3 +12,5 @@ dotnet_diagnostic.RS0026.severity = none # Public API with optional parameter(s) should have the most parameters amongst its public overloads. dotnet_diagnostic.RS0027.severity = none + +dotnet_diagnostic.CA2007.severity = warning; diff --git a/src/Directory.Build.props b/src/Directory.Build.props index f7d4b965b7..aca6ef7b02 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -1,14 +1,11 @@ - + + true true - - - - diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index bd23ae4e27..3c10503037 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -2,9 +2,7 @@ Shay Rojansky - - netstandard2.0;net7.0 - net7.0 + net10.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md @@ -12,9 +10,10 @@ - + + diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs new file mode 100644 index 0000000000..6e2b4e7d4f --- /dev/null +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs @@ -0,0 +1,220 @@ +using System; +using System.ComponentModel; +using Npgsql; + +namespace Microsoft.Extensions.DependencyInjection; + +public static partial class NpgsqlServiceCollectionExtensions +{ + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); +} diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index a9333a0753..7e22029a40 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data.Common; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Logging; @@ -10,8 +10,33 @@ namespace Microsoft.Extensions.DependencyInjection; /// /// Extension method for setting up Npgsql services in an . /// -public static class NpgsqlServiceCollectionExtensions +public static partial class NpgsqlServiceCollectionExtensions { + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + /// /// Registers an and an in the . /// @@ -22,42 +47,162 @@ public static class NpgsqlServiceCollectionExtensions /// /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlDataSourceCore(serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder) + , connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the . /// /// The to add services to. /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, + Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); /// /// Registers an and an in the @@ -69,120 +214,338 @@ public static IServiceCollection AddNpgsqlDataSource( /// /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlMultiHostDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the - /// . /// /// The to add services to. /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, + Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlMultiHostDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); static IServiceCollection AddNpgsqlDataSourceCore( this IServiceCollection serviceCollection, + object? serviceKey, string connectionString, - Action? dataSourceBuilderAction, + Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime dataSourceLifetime, + object? state) { serviceCollection.TryAdd( new ServiceDescriptor( typeof(NpgsqlDataSource), - sp => + serviceKey, + (sp, key) => { var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); - dataSourceBuilderAction?.Invoke(dataSourceBuilder); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); return dataSourceBuilder.Build(); }, dataSourceLifetime)); - AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); return serviceCollection; } - static IServiceCollection AddNpgsqlMultiHostDataSourceCore( + static IServiceCollection AddNpgsqlSlimDataSourceCore( this IServiceCollection serviceCollection, + object? serviceKey, string connectionString, - Action? dataSourceBuilderAction, + Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime dataSourceLifetime, + object? state) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + serviceKey, + (sp, key) => + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); + dataSourceBuilder.UseLoggerFactory(sp.GetService()); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); + return dataSourceBuilder.Build(); + }, + dataSourceLifetime)); + + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); + + return serviceCollection; + } + + static IServiceCollection AddMultiHostNpgsqlDataSourceCore( + this IServiceCollection serviceCollection, + object? serviceKey, + string connectionString, + Action? dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime, + object? state) { serviceCollection.TryAdd( new ServiceDescriptor( typeof(NpgsqlMultiHostDataSource), - sp => + serviceKey, + (sp, key) => { var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); - dataSourceBuilderAction?.Invoke(dataSourceBuilder); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); return dataSourceBuilder.BuildMultiHost(); }, dataSourceLifetime)); + if (serviceKey is not null) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + dataSourceLifetime)); + } + else + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + } + + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); + + return serviceCollection; + } + + static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( + this IServiceCollection serviceCollection, + object? serviceKey, + string connectionString, + Action? dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime, + object? state) + { serviceCollection.TryAdd( new ServiceDescriptor( - typeof(NpgsqlDataSource), - sp => sp.GetRequiredService(), + typeof(NpgsqlMultiHostDataSource), + serviceKey, + (sp, _) => + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); + dataSourceBuilder.UseLoggerFactory(sp.GetService()); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); + return dataSourceBuilder.BuildMultiHost(); + }, dataSourceLifetime)); - AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + if (serviceKey is not null) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + dataSourceLifetime)); + } + else + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + } + + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); return serviceCollection; } static void AddCommonServices( IServiceCollection serviceCollection, + object? serviceKey, ServiceLifetime connectionLifetime, ServiceLifetime dataSourceLifetime) { - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(NpgsqlConnection), - sp => sp.GetRequiredService().CreateConnection(), - connectionLifetime)); + // We don't try to invoke KeyedService methods if there is no service key. + // This allows user code that use non-standard containers without support for IKeyedServiceProvider to keep on working. + if (serviceKey is not null) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlConnection), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key).CreateConnection(), + connectionLifetime)); - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(DbDataSource), - sp => sp.GetRequiredService(), - dataSourceLifetime)); + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbDataSource), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + dataSourceLifetime)); - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(DbConnection), - sp => sp.GetRequiredService(), - connectionLifetime)); + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbConnection), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + connectionLifetime)); + } + else + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlConnection), + sp => sp.GetRequiredService().CreateConnection(), + connectionLifetime)); + + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbConnection), + sp => sp.GetRequiredService(), + connectionLifetime)); + } } -} \ No newline at end of file +} diff --git a/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs b/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs index 1a340b1a15..f30dbdd96f 100644 --- a/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs +++ b/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt new file mode 100644 index 0000000000..86c0b922d4 --- /dev/null +++ b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt @@ -0,0 +1,22 @@ +#nullable enable +Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt new file mode 100644 index 0000000000..7dc5c58110 --- /dev/null +++ b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt @@ -0,0 +1 @@ +#nullable enable diff --git a/src/Npgsql.DependencyInjection/README.md b/src/Npgsql.DependencyInjection/README.md index 16b87419f0..7b22c8d15d 100644 --- a/src/Npgsql.DependencyInjection/README.md +++ b/src/Npgsql.DependencyInjection/README.md @@ -42,6 +42,39 @@ app.MapGet("/", async (NpgsqlDataSource dataSource) => }); ``` -Finally, the `AddNpgsqlDataSource` method also accepts a lambda parameter allowing you to configure aspects of Npgsql beyond the connection string. +The `AddNpgsqlDataSource` method also accepts a lambda parameter allowing you to configure aspects of Npgsql beyond the connection string, e.g. to configure `UseLoggerFactory` and `UseNetTopologySuite`: + +```csharp +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddNpgsqlDataSource( + "Host=pg_server;Username=test;Password=test;Database=test", + builder => builder + .UseLoggerFactory(loggerFactory) + .UseNetTopologySuite()); +``` + +Finally, starting with Npgsql and .NET 8.0, you can now register multiple data sources (and connections), using a service key to distinguish between them: + +```c# +var builder = WebApplication.CreateBuilder(args); + +builder.Services + .AddNpgsqlDataSource("Host=localhost;Database=CustomersDB;Username=test;Password=test", serviceKey: DatabaseType.CustomerDb) + .AddNpgsqlDataSource("Host=localhost;Database=OrdersDB;Username=test;Password=test", serviceKey: DatabaseType.OrdersDb); + +var app = builder.Build(); + +app.MapGet("/", async ([FromKeyedServices(DatabaseType.OrdersDb)] NpgsqlConnection connection) + => connection.ConnectionString); + +app.Run(); + +enum DatabaseType +{ + CustomerDb, + OrdersDb +} +``` For more information, [see the Npgsql documentation](https://www.npgsql.org/doc/index.html). diff --git a/src/Npgsql.GeoJSON/Internal/CrsMap.WellKnown.cs b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs similarity index 98% rename from src/Npgsql.GeoJSON/Internal/CrsMap.WellKnown.cs rename to src/Npgsql.GeoJSON/CrsMap.WellKnown.cs index 9d733830ea..a2d8cd0217 100644 --- a/src/Npgsql.GeoJSON/Internal/CrsMap.WellKnown.cs +++ b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs @@ -1,14 +1,14 @@ -namespace Npgsql.GeoJSON.Internal; +namespace Npgsql.GeoJSON; -readonly partial struct CrsMap +public partial class CrsMap { /// /// These entries came from spatial_res_sys. They are used to elide memory allocations /// if they are identical to the entries for the current connection. Otherwise, - /// memory allocated for overrided entries only (added, removed, or modified). + /// memory allocated for overridden entries only (added, removed, or modified). /// internal static readonly CrsMapEntry[] WellKnown = - { + [ new(2000, 2180, "EPSG"), new(2188, 2217, "EPSG"), new(2219, 2220, "EPSG"), @@ -584,6 +584,6 @@ readonly partial struct CrsMap new(32601, 32667, "EPSG"), new(32701, 32761, "EPSG"), new(32766, 32766, "EPSG"), - new(900913, 900913, "spatialreferencing.org"), - }; -} \ No newline at end of file + new(900913, 900913, "spatialreferencing.org") + ]; +} diff --git a/src/Npgsql.GeoJSON/CrsMap.cs b/src/Npgsql.GeoJSON/CrsMap.cs new file mode 100644 index 0000000000..602387a911 --- /dev/null +++ b/src/Npgsql.GeoJSON/CrsMap.cs @@ -0,0 +1,59 @@ + +namespace Npgsql.GeoJSON; + +/// +/// A map of entries that map the authority to the inclusive range of SRID. +/// +public partial class CrsMap +{ + readonly CrsMapEntry[]? _overridden; + + internal CrsMap(CrsMapEntry[]? overridden) + => _overridden = overridden; + + internal string? GetAuthority(int srid) + => GetAuthority(_overridden, srid) ?? GetAuthority(WellKnown, srid); + + static string? GetAuthority(CrsMapEntry[]? entries, int srid) + { + if (entries == null) + return null; + + var left = 0; + var right = entries.Length; + while (left <= right) + { + var middle = left + (right - left) / 2; + var entry = entries[middle]; + + if (srid < entry.MinSrid) + right = middle - 1; + else + if (srid > entry.MaxSrid) + left = middle + 1; + else + return entry.Authority; + } + + return null; + } +} + +/// +/// An entry which maps the authority to the inclusive range of SRID. +/// +readonly struct CrsMapEntry +{ + internal readonly int MinSrid; + internal readonly int MaxSrid; + internal readonly string? Authority; + + internal CrsMapEntry(int minSrid, int maxSrid, string? authority) + { + MinSrid = minSrid; + MaxSrid = maxSrid; + Authority = authority != null + ? string.IsInterned(authority) ?? authority + : null; + } +} diff --git a/src/Npgsql.GeoJSON/CrsMapExtensions.cs b/src/Npgsql.GeoJSON/CrsMapExtensions.cs new file mode 100644 index 0000000000..dde5e0f688 --- /dev/null +++ b/src/Npgsql.GeoJSON/CrsMapExtensions.cs @@ -0,0 +1,51 @@ +using System; +using System.Threading.Tasks; +using Npgsql.GeoJSON.Internal; + +namespace Npgsql.GeoJSON; + +/// +/// Extensions for getting a CrsMap from a database. +/// +public static class CrsMapExtensions +{ + /// + /// Gets the full crs details from the database. + /// + /// + public static async Task GetCrsMapAsync(this NpgsqlDataSource dataSource) + { + var builder = new CrsMapBuilder(); + using var cmd = GetCsrCommand(dataSource); + using var reader = await cmd.ExecuteReaderAsync().ConfigureAwait(false); + + while (await reader.ReadAsync().ConfigureAwait(false)) + builder.Add(new CrsMapEntry(reader.GetInt32(0), reader.GetInt32(1), reader.GetString(2))); + + return builder.Build(); + } + + /// + /// Gets the full crs details from the database. + /// + /// + public static CrsMap GetCrsMap(this NpgsqlDataSource dataSource) + { + var builder = new CrsMapBuilder(); + using var cmd = GetCsrCommand(dataSource); + using var reader = cmd.ExecuteReader(); + + while (reader.Read()) + builder.Add(new CrsMapEntry(reader.GetInt32(0), reader.GetInt32(1), reader.GetString(2))); + + return builder.Build(); + } + + static NpgsqlCommand GetCsrCommand(NpgsqlDataSource dataSource) + => dataSource.CreateCommand(""" + SELECT min(srid), max(srid), auth_name + FROM(SELECT srid, auth_name, srid - rank() OVER(PARTITION BY auth_name ORDER BY srid) AS range FROM spatial_ref_sys) AS s + GROUP BY range, auth_name + ORDER BY 1; + """); +} diff --git a/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs b/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs index 7702a7e0b3..eb37a4ba60 100644 --- a/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs +++ b/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs @@ -1,4 +1,4 @@ -using GeoJSON.Net.Geometry; +using GeoJSON.Net.Geometry; namespace Npgsql.GeoJSON.Internal; @@ -48,6 +48,6 @@ internal void Accumulate(Position position) internal double[] Build() => _hasAltitude - ? new[] { _minLongitude, _minLatitude, _minAltitude, _maxLongitude, _maxLatitude, _maxAltitude } - : new[] { _minLongitude, _minLatitude, _maxLongitude, _maxLatitude }; + ? [_minLongitude, _minLatitude, _minAltitude, _maxLongitude, _maxLatitude, _maxAltitude] + : [_minLongitude, _minLatitude, _maxLongitude, _maxLatitude]; } \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/CrsMap.cs b/src/Npgsql.GeoJSON/Internal/CrsMap.cs deleted file mode 100644 index aa7dc58e2d..0000000000 --- a/src/Npgsql.GeoJSON/Internal/CrsMap.cs +++ /dev/null @@ -1,108 +0,0 @@ -using System; - -namespace Npgsql.GeoJSON.Internal; - -/// -/// An entry which maps the authority to the inclusive range of SRID. -/// -readonly struct CrsMapEntry -{ - internal readonly int MinSrid; - internal readonly int MaxSrid; - internal readonly string? Authority; - - internal CrsMapEntry(int minSrid, int maxSrid, string? authority) - { - MinSrid = minSrid; - MaxSrid = maxSrid; - Authority = authority != null - ? string.IsInterned(authority) ?? authority - : null; - } -} - -struct CrsMapBuilder -{ - CrsMapEntry[] _overrides; - int _overridenIndex; - int _wellKnownIndex; - - internal void Add(in CrsMapEntry entry) - { - var wellKnown = CrsMap.WellKnown[_wellKnownIndex]; - if (wellKnown.MinSrid == entry.MinSrid && - wellKnown.MaxSrid == entry.MaxSrid && - string.Equals(wellKnown.Authority, entry.Authority, StringComparison.Ordinal)) - { - _wellKnownIndex++; - return; - } - - if (wellKnown.MinSrid < entry.MinSrid) - { - do - _wellKnownIndex++; - while (CrsMap.WellKnown.Length < _wellKnownIndex && - CrsMap.WellKnown[_wellKnownIndex].MaxSrid < entry.MaxSrid); - AddCore(new CrsMapEntry(wellKnown.MinSrid, Math.Min(wellKnown.MaxSrid, entry.MinSrid - 1), null)); - } - - AddCore(entry); - } - - void AddCore(in CrsMapEntry entry) - { - var index = _overridenIndex + 1; - if (_overrides == null) - _overrides = new CrsMapEntry[4]; - else - if (_overrides.Length == index) - Array.Resize(ref _overrides, _overrides.Length << 1); - - _overrides[_overridenIndex] = entry; - _overridenIndex = index; - } - - internal CrsMap Build() - { - if (_overrides != null && _overrides.Length < _overridenIndex) - Array.Resize(ref _overrides, _overridenIndex); - - return new CrsMap(_overrides); - } -} - -readonly partial struct CrsMap -{ - readonly CrsMapEntry[]? _overriden; - - internal CrsMap(CrsMapEntry[]? overriden) - => _overriden = overriden; - - internal string? GetAuthority(int srid) - => GetAuthority(_overriden, srid) ?? GetAuthority(WellKnown, srid); - - static string? GetAuthority(CrsMapEntry[]? entries, int srid) - { - if (entries == null) - return null; - - var left = 0; - var right = entries.Length; - while (left <= right) - { - var middle = left + (right - left) / 2; - var entry = entries[middle]; - - if (srid < entry.MinSrid) - right = middle - 1; - else - if (srid > entry.MaxSrid) - left = middle + 1; - else - return entry.Authority; - } - - return null; - } -} \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs new file mode 100644 index 0000000000..a43300b6ef --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs @@ -0,0 +1,54 @@ +using System; + +namespace Npgsql.GeoJSON.Internal; + +struct CrsMapBuilder +{ + CrsMapEntry[] _overrides; + int _overriddenIndex; + int _wellKnownIndex; + + internal void Add(in CrsMapEntry entry) + { + var wellKnown = CrsMap.WellKnown[_wellKnownIndex]; + if (wellKnown.MinSrid == entry.MinSrid && + wellKnown.MaxSrid == entry.MaxSrid && + string.Equals(wellKnown.Authority, entry.Authority, StringComparison.Ordinal)) + { + _wellKnownIndex++; + return; + } + + if (wellKnown.MinSrid < entry.MinSrid) + { + do + _wellKnownIndex++; + while (CrsMap.WellKnown.Length < _wellKnownIndex && + CrsMap.WellKnown[_wellKnownIndex].MaxSrid < entry.MaxSrid); + AddCore(new CrsMapEntry(wellKnown.MinSrid, Math.Min(wellKnown.MaxSrid, entry.MinSrid - 1), null)); + } + + AddCore(entry); + } + + void AddCore(in CrsMapEntry entry) + { + var index = _overriddenIndex + 1; + if (_overrides == null) + _overrides = new CrsMapEntry[4]; + else + if (_overrides.Length == index) + Array.Resize(ref _overrides, _overrides.Length << 1); + + _overrides[_overriddenIndex] = entry; + _overriddenIndex = index; + } + + internal CrsMap Build() + { + if (_overrides != null && _overrides.Length < _overriddenIndex) + Array.Resize(ref _overrides, _overriddenIndex); + + return new CrsMap(_overrides); + } +} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs new file mode 100644 index 0000000000..22c527f23a --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -0,0 +1,733 @@ +using System; +using System.Buffers.Binary; +using System.Collections.Concurrent; +using System.Collections.ObjectModel; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using GeoJSON.Net; +using GeoJSON.Net.CoordinateReferenceSystem; +using GeoJSON.Net.Geometry; +using Npgsql.Internal; + +namespace Npgsql.GeoJSON.Internal; + +sealed class GeoJSONConverter : PgStreamingConverter where T : IGeoJSONObject +{ + readonly ConcurrentDictionary _cachedCrs = new(); + readonly GeoJSONOptions _options; + readonly Func _getCrs; + + public GeoJSONConverter(GeoJSONOptions options, CrsMap crsMap) + { + _options = options; + _getCrs = GetCrs( + crsMap, + _cachedCrs, + crsType: _options & (GeoJSONOptions.ShortCRS | GeoJSONOptions.LongCRS) + ); + } + + bool BoundingBox => (_options & GeoJSONOptions.BoundingBox) != 0; + + public override T Read(PgReader reader) + => (T)GeoJSONConverter.Read(async: false, reader, BoundingBox ? new BoundingBoxBuilder() : null, _getCrs, CancellationToken.None).GetAwaiter().GetResult(); + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => (T)await GeoJSONConverter.Read(async: true, reader, BoundingBox ? new BoundingBoxBuilder() : null, _getCrs, cancellationToken).ConfigureAwait(false); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => GeoJSONConverter.GetSize(context, value, ref writeState); + + public override void Write(PgWriter writer, T value) + => GeoJSONConverter.Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => GeoJSONConverter.Write(async: true, writer, value, CancellationToken.None); + + static Func GetCrs(CrsMap crsMap, ConcurrentDictionary cachedCrs, GeoJSONOptions crsType) + => srid => + { + if (crsType == GeoJSONOptions.None) + return null; + + return cachedCrs.GetOrAdd(srid, static (srid, state) => + { + var (crsMap, crsType) = state; + var authority = crsMap.GetAuthority(srid); + + return authority is null + ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") + : new NamedCRS(crsType == GeoJSONOptions.LongCRS + ? "urn:ogc:def:crs:" + authority + "::" + srid + : authority + ":" + srid); + }, (crsMap, crsType)); + }; +} + +static class GeoJSONConverter +{ + public static async ValueTask Read(bool async, PgReader reader, BoundingBoxBuilder? boundingBox, Func getCrs, CancellationToken cancellationToken) + { + var geometry = await Core(async, reader, boundingBox, getCrs, cancellationToken).ConfigureAwait(false); + geometry.BoundingBoxes = boundingBox?.Build(); + return geometry; + + static async ValueTask Core(bool async, PgReader reader, BoundingBoxBuilder? boundingbox, Func getCrs, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(SizeOfHeader)) + await reader.BufferData(async, SizeOfHeader, cancellationToken).ConfigureAwait(false); + + var littleEndian = reader.ReadByte() > 0; + var type = (EwkbGeometryType)ReadUInt32(littleEndian); + + GeoJSONObject geometry; + NamedCRS? crs = null; + + if (HasSrid(type)) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.BufferData(async, sizeof(int), cancellationToken).ConfigureAwait(false); + crs = getCrs(ReadInt32(littleEndian)); + } + + switch (type & EwkbGeometryType.BaseType) + { + case EwkbGeometryType.Point: + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + geometry = new Point(position); + break; + } + + case EwkbGeometryType.LineString: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var i = 0; i < coordinates.Length; ++i) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[i] = position; + } + geometry = new LineString(coordinates); + break; + } + + case EwkbGeometryType.Polygon: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var lines = new LineString[ReadInt32(littleEndian)]; + for (var i = 0; i < lines.Length; ++i) + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var j = 0; j < coordinates.Length; ++j) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[j] = position; + } + lines[i] = new LineString(coordinates); + } + geometry = new Polygon(lines); + break; + } + + case EwkbGeometryType.MultiPoint: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var points = new Point[ReadInt32(littleEndian)]; + for (var i = 0; i < points.Length; ++i) + { + if (SizeOfHeader + SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + if (async) + await reader.ConsumeAsync(SizeOfHeader, cancellationToken).ConfigureAwait(false); + else + reader.Consume(SizeOfHeader); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + points[i] = new Point(position); + } + geometry = new MultiPoint(points); + break; + } + + case EwkbGeometryType.MultiLineString: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var lines = new LineString[ReadInt32(littleEndian)]; + for (var i = 0; i < lines.Length; ++i) + { + if (reader.ShouldBuffer(SizeOfHeaderWithLength)) + await reader.BufferData(async, SizeOfHeaderWithLength, cancellationToken).ConfigureAwait(false); + + if (async) + await reader.ConsumeAsync(SizeOfHeader, cancellationToken).ConfigureAwait(false); + else + reader.Consume(SizeOfHeader); + + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var j = 0; j < coordinates.Length; ++j) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[j] = position; + } + lines[i] = new LineString(coordinates); + } + geometry = new MultiLineString(lines); + break; + } + + case EwkbGeometryType.MultiPolygon: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var polygons = new Polygon[ReadInt32(littleEndian)]; + for (var i = 0; i < polygons.Length; ++i) + { + if (reader.ShouldBuffer(SizeOfHeaderWithLength)) + await reader.BufferData(async, SizeOfHeaderWithLength, cancellationToken).ConfigureAwait(false); + + if (async) + await reader.ConsumeAsync(SizeOfHeader, cancellationToken).ConfigureAwait(false); + else + reader.Consume(SizeOfHeader); + + var lines = new LineString[ReadInt32(littleEndian)]; + for (var j = 0; j < lines.Length; ++j) + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var k = 0; k < coordinates.Length; ++k) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[k] = position; + } + lines[j] = new LineString(coordinates); + } + polygons[i] = new Polygon(lines); + } + geometry = new MultiPolygon(polygons); + break; + } + + case EwkbGeometryType.GeometryCollection: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var elements = new IGeometryObject[ReadInt32(littleEndian)]; + for (var i = 0; i < elements.Length; ++i) + elements[i] = (IGeometryObject)await Core(async, reader, boundingbox, getCrs, cancellationToken).ConfigureAwait(false); + geometry = new GeometryCollection(elements); + break; + } + + default: + throw UnknownPostGisType(); + } + + geometry.CRS = crs; + return geometry; + + int ReadInt32(bool littleEndian) + => littleEndian ? BinaryPrimitives.ReverseEndianness(reader.ReadInt32()) : reader.ReadInt32(); + uint ReadUInt32(bool littleEndian) + => littleEndian ? BinaryPrimitives.ReverseEndianness(reader.ReadUInt32()) : reader.ReadUInt32(); + } + + static Position ReadPosition(PgReader reader, EwkbGeometryType type, bool littleEndian) + { + var position = new Position( + longitude: ReadDouble(littleEndian), + latitude: ReadDouble(littleEndian), + altitude: HasZ(type) ? reader.ReadDouble() : null); + if (HasM(type)) ReadDouble(littleEndian); + return position; + + double ReadDouble(bool littleEndian) + => littleEndian + ? BitConverter.Int64BitsToDouble(BinaryPrimitives.ReverseEndianness(BitConverter.DoubleToInt64Bits(reader.ReadDouble()))) + : reader.ReadDouble(); + } + } + + public static Size GetSize(SizeContext context, IGeoJSONObject value, ref object? writeState) + => value.Type switch + { + GeoJSONObjectType.Point => GetSize((Point)value), + GeoJSONObjectType.LineString => GetSize((LineString)value), + GeoJSONObjectType.Polygon => GetSize((Polygon)value), + GeoJSONObjectType.MultiPoint => GetSize((MultiPoint)value), + GeoJSONObjectType.MultiLineString => GetSize((MultiLineString)value), + GeoJSONObjectType.MultiPolygon => GetSize((MultiPolygon)value), + GeoJSONObjectType.GeometryCollection => GetSize(context, (GeometryCollection)value, ref writeState), + _ => throw UnknownPostGisType() + }; + + static bool NotValid(ReadOnlyCollection coordinates, out bool hasZ) + { + if (coordinates.Count == 0) + hasZ = false; + else + { + hasZ = HasZ(coordinates[0]); + for (var i = 1; i < coordinates.Count; ++i) + if (HasZ(coordinates[i]) != hasZ) return true; + } + return false; + } + + static Size GetSize(Point value) + { + var length = Size.Create(SizeOfHeader + SizeOfPoint(HasZ(value.Coordinates))); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + return length; + } + + static Size GetSize(LineString value) + { + var coordinates = value.Coordinates; + if (NotValid(coordinates, out var hasZ)) + throw AllOrNoneCoordinatesMustHaveZ(nameof(LineString)); + + var length = Size.Create(SizeOfHeaderWithLength + coordinates.Count * SizeOfPoint(hasZ)); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + return length; + } + + static Size GetSize(Polygon value) + { + var lines = value.Coordinates; + var length = Size.Create(SizeOfHeaderWithLength + SizeOfLength * lines.Count); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var hasZ = false; + for (var i = 0; i < lines.Count; ++i) + { + var coordinates = lines[i].Coordinates; + if (NotValid(coordinates, out var lineHasZ)) + throw AllOrNoneCoordinatesMustHaveZ(nameof(Polygon)); + + if (hasZ != lineHasZ) + { + if (i == 0) hasZ = lineHasZ; + else throw AllOrNoneCoordinatesMustHaveZ(nameof(LineString)); + } + + length = length.Combine(coordinates.Count * SizeOfPoint(hasZ)); + } + + return length; + } + + static Size GetSize(MultiPoint value) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var coordinates = value.Coordinates; + foreach (var t in coordinates) + length = length.Combine(GetSize(t)); + + return length; + } + + static Size GetSize(MultiLineString value) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var coordinates = value.Coordinates; + foreach (var t in coordinates) + length = length.Combine(GetSize(t)); + + return length; + } + + static Size GetSize(MultiPolygon value) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var coordinates = value.Coordinates; + foreach (var t in coordinates) + length = length.Combine(GetSize(t)); + + return length; + } + + static Size GetSize(SizeContext context, GeometryCollection value, ref object? writeState) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var geometries = value.Geometries; + foreach (var t in geometries) + length = length.Combine(GetSize(context, (IGeoJSONObject)t, ref writeState)); + + return length; + } + + public static ValueTask Write(bool async, PgWriter writer, IGeoJSONObject value, CancellationToken cancellationToken = default) + => value.Type switch + { + GeoJSONObjectType.Point => Write(async, writer, (Point)value, cancellationToken), + GeoJSONObjectType.LineString => Write(async, writer, (LineString)value, cancellationToken), + GeoJSONObjectType.Polygon => Write(async, writer, (Polygon)value, cancellationToken), + GeoJSONObjectType.MultiPoint => Write(async, writer, (MultiPoint)value, cancellationToken), + GeoJSONObjectType.MultiLineString => Write(async, writer, (MultiLineString)value, cancellationToken), + GeoJSONObjectType.MultiPolygon => Write(async, writer, (MultiPolygon)value, cancellationToken), + GeoJSONObjectType.GeometryCollection => Write(async, writer, (GeometryCollection)value, cancellationToken), + _ => throw UnknownPostGisType() + }; + + static async ValueTask Write(bool async, PgWriter writer, Point value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.Point; + var size = SizeOfHeader; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + + if (srid != 0) + writer.WriteInt32(srid); + + await WritePosition(async, writer, value.Coordinates, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, LineString value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.LineString; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in coordinates) + await WritePosition(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, Polygon value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.Polygon; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var lines = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(lines.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in lines) + { + if (writer.ShouldFlush(SizeOfLength)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + var coordinates = t.Coordinates; + writer.WriteInt32(coordinates.Count); + foreach (var t1 in coordinates) + await WritePosition(async, writer, t1, cancellationToken).ConfigureAwait(false); + } + } + + static async ValueTask Write(bool async, PgWriter writer, MultiPoint value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.MultiPoint; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in coordinates) + await Write(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, MultiLineString value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.MultiLineString; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in coordinates) + await Write(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, MultiPolygon value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.MultiPolygon; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + foreach (var t in coordinates) + await Write(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, GeometryCollection value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.GeometryCollection; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var geometries = value.Geometries; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(geometries.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in geometries) + await Write(async, writer, (IGeoJSONObject)t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask WritePosition(bool async, PgWriter writer, IPosition coordinate, CancellationToken cancellationToken) + { + var altitude = coordinate.Altitude; + if (SizeOfPoint(altitude.HasValue) is var size && writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteDouble(coordinate.Longitude); + writer.WriteDouble(coordinate.Latitude); + if (altitude.HasValue) + writer.WriteDouble(altitude.Value); + } + + static ValueTask BufferData(this PgReader reader, bool async, int byteCount, CancellationToken cancellationToken) + { + if (async) + return reader.BufferAsync(byteCount, cancellationToken); + + reader.Buffer(byteCount); + return new(); + } + + static ValueTask Flush(this PgWriter writer, bool async, CancellationToken cancellationToken) + { + if (async) + return writer.FlushAsync(cancellationToken); + + writer.Flush(); + return new(); + } + + static bool HasSrid(EwkbGeometryType type) + => (type & EwkbGeometryType.HasSrid) != 0; + + static bool HasZ(EwkbGeometryType type) + => (type & EwkbGeometryType.HasZ) != 0; + + static bool HasM(EwkbGeometryType type) + => (type & EwkbGeometryType.HasM) != 0; + + static bool HasZ(IPosition coordinates) + => coordinates.Altitude.HasValue; + + const int SizeOfLength = sizeof(int); + const int SizeOfHeader = sizeof(byte) + sizeof(EwkbGeometryType); + const int SizeOfHeaderWithLength = SizeOfHeader + SizeOfLength; + const int SizeOfPoint2D = 2 * sizeof(double); + const int SizeOfPoint3D = 3 * sizeof(double); + + static int SizeOfPoint(bool hasZ) + => hasZ ? SizeOfPoint3D : SizeOfPoint2D; + + static int SizeOfPoint(EwkbGeometryType type) + { + var size = SizeOfPoint2D; + if (HasZ(type)) + size += sizeof(double); + if (HasM(type)) + size += sizeof(double); + return size; + } + + static Exception UnknownPostGisType() + => throw new InvalidOperationException("Invalid PostGIS type"); + + static Exception AllOrNoneCoordinatesMustHaveZ(string typeName) + => new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName}"); + + static int GetSrid(ICRSObject crs) + { + if (crs is null or UnspecifiedCRS) + return 0; + + var namedCrs = crs as NamedCRS; + if (namedCrs == null) + throw new NotSupportedException("The LinkedCRS class isn't supported"); + + if (namedCrs.Properties.TryGetValue("name", out var value) && value != null) + { + var name = value.ToString()!; + if (string.Equals(name, "urn:ogc:def:crs:OGC::CRS84", StringComparison.Ordinal)) + return 4326; + + var index = name.LastIndexOf(':'); + if (index != -1 && int.TryParse(name.Substring(index + 1), out var srid)) + return srid; + + throw new FormatException("The specified CRS isn't properly named"); + } + + return 0; + } +} + +/// +/// Represents the identifier of the Well Known Binary representation of a geographical feature specified by the OGC. +/// http://portal.opengeospatial.org/files/?artifact_id=13227 Chapter 6.3.2.7 +/// +[Flags] +enum EwkbGeometryType : uint +{ + // Types + Point = 1, + LineString = 2, + Polygon = 3, + MultiPoint = 4, + MultiLineString = 5, + MultiPolygon = 6, + GeometryCollection = 7, + + // Masks + BaseType = Point | LineString | Polygon | MultiPoint | MultiLineString | MultiPolygon | GeometryCollection, + + // Flags + HasSrid = 0x20000000, + HasM = 0x40000000, + HasZ = 0x80000000 +} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs deleted file mode 100644 index 4c3c90b866..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs +++ /dev/null @@ -1,720 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.ObjectModel; -using System.Threading; -using System.Threading.Tasks; -using GeoJSON.Net; -using GeoJSON.Net.CoordinateReferenceSystem; -using GeoJSON.Net.Geometry; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.GeoJSON.Internal; - -sealed partial class GeoJsonHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler -{ - readonly GeoJSONOptions _options; - readonly CrsMap _crsMap; - readonly ConcurrentDictionary _cachedCrs = new(); - - internal GeoJsonHandler(PostgresType postgresType, GeoJSONOptions options, CrsMap crsMap) - : base(postgresType) - { - _options = options; - _crsMap = crsMap; - } - - GeoJSONOptions CrsType => _options & (GeoJSONOptions.ShortCRS | GeoJSONOptions.LongCRS); - - bool BoundingBox => (_options & GeoJSONOptions.BoundingBox) != 0; - - static bool HasSrid(EwkbGeometryType type) - => (type & EwkbGeometryType.HasSrid) != 0; - - static bool HasZ(EwkbGeometryType type) - => (type & EwkbGeometryType.HasZ) != 0; - - static bool HasM(EwkbGeometryType type) - => (type & EwkbGeometryType.HasM) != 0; - - static bool HasZ(IPosition coordinates) - => coordinates.Altitude.HasValue; - - const int SizeOfLength = sizeof(int); - const int SizeOfHeader = sizeof(byte) + sizeof(EwkbGeometryType); - const int SizeOfHeaderWithLength = SizeOfHeader + SizeOfLength; - const int SizeOfPoint2D = 2 * sizeof(double); - const int SizeOfPoint3D = 3 * sizeof(double); - - static int SizeOfPoint(bool hasZ) - => hasZ ? SizeOfPoint3D : SizeOfPoint2D; - - static int SizeOfPoint(EwkbGeometryType type) - { - var size = SizeOfPoint2D; - if (HasZ(type)) - size += sizeof(double); - if (HasM(type)) - size += sizeof(double); - return size; - } - - #region Throw - - static Exception UnknownPostGisType() - => throw new InvalidOperationException("Invalid PostGIS type"); - - static Exception AllOrNoneCoordiantesMustHaveZ(NpgsqlParameter? parameter, string typeName) - => parameter is null - ? new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName}") - : new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName} in the {parameter.ParameterName} parameter", parameter.ParameterName); - - #endregion - - #region Read - - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (Point)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (LineString)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (Polygon)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (MultiPoint)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (MultiLineString)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (MultiPolygon)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (GeometryCollection)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (IGeometryObject)await ReadGeometry(buf, async); - - async ValueTask ReadGeometry(NpgsqlReadBuffer buf, bool async) - { - var boundingBox = BoundingBox ? new BoundingBoxBuilder() : null; - var geometry = await ReadGeometryCore(buf, async, boundingBox); - - geometry.BoundingBoxes = boundingBox?.Build(); - return geometry; - } - - async ValueTask ReadGeometryCore(NpgsqlReadBuffer buf, bool async, BoundingBoxBuilder? boundingBox) - { - await buf.Ensure(SizeOfHeader, async); - var littleEndian = buf.ReadByte() > 0; - var type = (EwkbGeometryType)buf.ReadUInt32(littleEndian); - - GeoJSONObject geometry; - NamedCRS? crs = null; - - if (HasSrid(type)) - { - await buf.Ensure(4, async); - crs = GetCrs(buf.ReadInt32(littleEndian)); - } - - switch (type & EwkbGeometryType.BaseType) - { - case EwkbGeometryType.Point: - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - geometry = new Point(position); - break; - } - - case EwkbGeometryType.LineString: - { - await buf.Ensure(SizeOfLength, async); - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < coordinates.Length; ++i) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[i] = position; - } - geometry = new LineString(coordinates); - break; - } - - case EwkbGeometryType.Polygon: - { - await buf.Ensure(SizeOfLength, async); - var lines = new LineString[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < lines.Length; ++i) - { - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var j = 0; j < coordinates.Length; ++j) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[j] = position; - } - lines[i] = new LineString(coordinates); - } - geometry = new Polygon(lines); - break; - } - - case EwkbGeometryType.MultiPoint: - { - await buf.Ensure(SizeOfLength, async); - var points = new Point[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < points.Length; ++i) - { - await buf.Ensure(SizeOfHeader + SizeOfPoint(type), async); - await buf.Skip(SizeOfHeader, async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - points[i] = new Point(position); - } - geometry = new MultiPoint(points); - break; - } - - case EwkbGeometryType.MultiLineString: - { - await buf.Ensure(SizeOfLength, async); - var lines = new LineString[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < lines.Length; ++i) - { - await buf.Ensure(SizeOfHeaderWithLength, async); - await buf.Skip(SizeOfHeader, async); - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var j = 0; j < coordinates.Length; ++j) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[j] = position; - } - lines[i] = new LineString(coordinates); - } - geometry = new MultiLineString(lines); - break; - } - - case EwkbGeometryType.MultiPolygon: - { - await buf.Ensure(SizeOfLength, async); - var polygons = new Polygon[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < polygons.Length; ++i) - { - await buf.Ensure(SizeOfHeaderWithLength, async); - await buf.Skip(SizeOfHeader, async); - var lines = new LineString[buf.ReadInt32(littleEndian)]; - for (var j = 0; j < lines.Length; ++j) - { - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var k = 0; k < coordinates.Length; ++k) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[k] = position; - } - lines[j] = new LineString(coordinates); - } - polygons[i] = new Polygon(lines); - } - geometry = new MultiPolygon(polygons); - break; - } - - case EwkbGeometryType.GeometryCollection: - { - await buf.Ensure(SizeOfLength, async); - var elements = new IGeometryObject[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < elements.Length; ++i) - elements[i] = (IGeometryObject)await ReadGeometryCore(buf, async, boundingBox); - geometry = new GeometryCollection(elements); - break; - } - - default: - throw UnknownPostGisType(); - } - - geometry.CRS = crs; - return geometry; - } - - static Position ReadPosition(NpgsqlReadBuffer buf, EwkbGeometryType type, bool littleEndian) - { - var position = new Position( - longitude: buf.ReadDouble(littleEndian), - latitude: buf.ReadDouble(littleEndian), - altitude: HasZ(type) ? buf.ReadDouble() : (double?)null); - if (HasM(type)) buf.ReadDouble(littleEndian); - return position; - } - - #endregion - - #region Write - - public override int ValidateAndGetLength(GeoJSONObject value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Type switch - { - GeoJSONObjectType.Point => ValidateAndGetLength((Point)value, ref lengthCache, parameter), - GeoJSONObjectType.LineString => ValidateAndGetLength((LineString)value, ref lengthCache, parameter), - GeoJSONObjectType.Polygon => ValidateAndGetLength((Polygon)value, ref lengthCache, parameter), - GeoJSONObjectType.MultiPoint => ValidateAndGetLength((MultiPoint)value, ref lengthCache, parameter), - GeoJSONObjectType.MultiLineString => ValidateAndGetLength((MultiLineString)value, ref lengthCache, parameter), - GeoJSONObjectType.MultiPolygon => ValidateAndGetLength((MultiPolygon)value, ref lengthCache, parameter), - GeoJSONObjectType.GeometryCollection => ValidateAndGetLength((GeometryCollection)value, ref lengthCache, parameter), - _ => throw UnknownPostGisType() - }; - - public int ValidateAndGetLength(Point value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeader + SizeOfPoint(HasZ(value.Coordinates)); - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - return length; - } - - public int ValidateAndGetLength(LineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var coordinates = value.Coordinates; - if (NotValid(coordinates, out var hasZ)) - throw AllOrNoneCoordiantesMustHaveZ(parameter, nameof(LineString)); - - var length = SizeOfHeaderWithLength + coordinates.Count * SizeOfPoint(hasZ); - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - return length; - } - - public int ValidateAndGetLength(Polygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var lines = value.Coordinates; - var length = SizeOfHeaderWithLength + SizeOfLength * lines.Count; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var hasZ = false; - for (var i = 0; i < lines.Count; ++i) - { - var coordinates = lines[i].Coordinates; - if (NotValid(coordinates, out var lineHasZ)) - throw AllOrNoneCoordiantesMustHaveZ(parameter, nameof(Polygon)); - - if (hasZ != lineHasZ) - { - if (i == 0) hasZ = lineHasZ; - else throw AllOrNoneCoordiantesMustHaveZ(parameter, nameof(LineString)); - } - - length += coordinates.Count * SizeOfPoint(hasZ); - } - - return length; - } - - static bool NotValid(ReadOnlyCollection coordinates, out bool hasZ) - { - if (coordinates.Count == 0) - hasZ = false; - else - { - hasZ = HasZ(coordinates[0]); - for (var i = 1; i < coordinates.Count; ++i) - if (HasZ(coordinates[i]) != hasZ) return true; - } - return false; - } - - public int ValidateAndGetLength(MultiPoint value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var coordinates = value.Coordinates; - for (var i = 0; i < coordinates.Count; ++i) - length += ValidateAndGetLength(coordinates[i], ref lengthCache, parameter); - - return length; - } - - public int ValidateAndGetLength(MultiLineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var coordinates = value.Coordinates; - for (var i = 0; i < coordinates.Count; ++i) - length += ValidateAndGetLength(coordinates[i], ref lengthCache, parameter); - - return length; - } - - public int ValidateAndGetLength(MultiPolygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var coordinates = value.Coordinates; - for (var i = 0; i < coordinates.Count; ++i) - length += ValidateAndGetLength(coordinates[i], ref lengthCache, parameter); - - return length; - } - - public int ValidateAndGetLength(GeometryCollection value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var geometries = value.Geometries; - for (var i = 0; i < geometries.Count; ++i) - length += ValidateAndGetLength((GeoJSONObject)geometries[i], ref lengthCache, parameter); - - return length; - } - - int INpgsqlTypeHandler.ValidateAndGetLength(IGeoJSONObject value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((GeoJSONObject)value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(IGeometryObject value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((GeoJSONObject)value, ref lengthCache, parameter); - - public override Task Write(GeoJSONObject value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value.Type switch - { - GeoJSONObjectType.Point => Write((Point)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.LineString => Write((LineString)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.Polygon => Write((Polygon)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.MultiPoint => Write((MultiPoint)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.MultiLineString => Write((MultiLineString)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.MultiPolygon => Write((MultiPolygon)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.GeometryCollection => Write((GeometryCollection)value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw UnknownPostGisType() - }; - - public async Task Write(Point value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.Point; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - - if (srid != 0) - buf.WriteInt32(srid); - - await WritePosition(value.Coordinates, buf, async, cancellationToken); - } - - public async Task Write(LineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.LineString; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < coordinates.Count; ++i) - await WritePosition(coordinates[i], buf, async, cancellationToken); - } - - public async Task Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.Polygon; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var lines = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(lines.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < lines.Count; ++i) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - var coordinates = lines[i].Coordinates; - buf.WriteInt32(coordinates.Count); - for (var j = 0; j < coordinates.Count; ++j) - await WritePosition(coordinates[j], buf, async, cancellationToken); - } - } - - public async Task Write(MultiPoint value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.MultiPoint; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < coordinates.Count; ++i) - await Write(coordinates[i], buf, lengthCache, parameter, async, cancellationToken); - } - - public async Task Write(MultiLineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.MultiLineString; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < coordinates.Count; ++i) - await Write(coordinates[i], buf, lengthCache, parameter, async, cancellationToken); - } - - public async Task Write(MultiPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.MultiPolygon; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - for (var i = 0; i < coordinates.Count; ++i) - await Write(coordinates[i], buf, lengthCache, parameter, async, cancellationToken); - } - - public async Task Write(GeometryCollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.GeometryCollection; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var geometries = value.Geometries; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(geometries.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < geometries.Count; ++i) - await Write((GeoJSONObject) geometries[i], buf, lengthCache, parameter, async, cancellationToken); - } - - Task INpgsqlTypeHandler.Write(IGeoJSONObject value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => Write((GeoJSONObject)value, buf, lengthCache, parameter, async, cancellationToken); - - Task INpgsqlTypeHandler.Write(IGeometryObject value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => Write((GeoJSONObject)value, buf, lengthCache, parameter, async, cancellationToken); - - static async Task WritePosition(IPosition coordinate, NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - { - var altitude = coordinate.Altitude; - if (buf.WriteSpaceLeft < SizeOfPoint(altitude.HasValue)) - await buf.Flush(async, cancellationToken); - buf.WriteDouble(coordinate.Longitude); - buf.WriteDouble(coordinate.Latitude); - if (altitude.HasValue) - buf.WriteDouble(altitude.Value); - } - - #endregion - - #region Crs - - NamedCRS? GetCrs(int srid) - { - var crsType = CrsType; - if (crsType == GeoJSONOptions.None) - return null; - -#if NETSTANDARD2_0 - return _cachedCrs.GetOrAdd(srid, srid => - { - var authority = _crsMap.GetAuthority(srid); - - return authority is null - ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") - : new NamedCRS(crsType == GeoJSONOptions.LongCRS - ? "urn:ogc:def:crs:" + authority + "::" + srid - : authority + ":" + srid); - }); -#else - return _cachedCrs.GetOrAdd(srid, static (srid, me) => - { - var authority = me._crsMap.GetAuthority(srid); - - return authority is null - ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") - : new NamedCRS(me.CrsType == GeoJSONOptions.LongCRS - ? "urn:ogc:def:crs:" + authority + "::" + srid - : authority + ":" + srid); - }, this); -#endif - } - - static int GetSrid(ICRSObject crs) - { - if (crs == null || crs is UnspecifiedCRS) - return 0; - - var namedCrs = crs as NamedCRS; - if (namedCrs == null) - throw new NotSupportedException("The LinkedCRS class isn't supported"); - - if (namedCrs.Properties.TryGetValue("name", out var value) && value != null) - { - var name = value.ToString()!; - if (string.Equals(name, "urn:ogc:def:crs:OGC::CRS84", StringComparison.Ordinal)) - return 4326; - - var index = name.LastIndexOf(':'); - if (index != -1 && int.TryParse(name.Substring(index + 1), out var srid)) - return srid; - - throw new FormatException("The specified CRS isn't properly named"); - } - - return 0; - } - - #endregion -} - -/// -/// Represents the identifier of the Well Known Binary representation of a geographical feature specified by the OGC. -/// http://portal.opengeospatial.org/files/?artifact_id=13227 Chapter 6.3.2.7 -/// -[Flags] -enum EwkbGeometryType : uint -{ - // Types - Point = 1, - LineString = 2, - Polygon = 3, - MultiPoint = 4, - MultiLineString = 5, - MultiPolygon = 6, - GeometryCollection = 7, - - // Masks - BaseType = Point | LineString | Polygon | MultiPoint | MultiLineString | MultiPolygon | GeometryCollection, - - // Flags - HasSrid = 0x20000000, - HasM = 0x40000000, - HasZ = 0x80000000 -} \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs deleted file mode 100644 index 862dc80947..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs +++ /dev/null @@ -1,91 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Data; -using GeoJSON.Net; -using GeoJSON.Net.Geometry; -using Newtonsoft.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.GeoJSON.Internal; - -public class GeoJSONTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly GeoJsonHandler? _geometryHandler, _geographyHandler; - readonly bool _geographyAsDefault; - - static readonly ConcurrentDictionary CRSMaps = new(); - - internal GeoJSONTypeHandlerResolver(NpgsqlConnector connector, GeoJSONOptions options, bool geographyAsDefault) - { - _databaseInfo = connector.DatabaseInfo; - _geographyAsDefault = geographyAsDefault; - - var crsMap = (options & (GeoJSONOptions.ShortCRS | GeoJSONOptions.LongCRS)) == GeoJSONOptions.None - ? default : CRSMaps.GetOrAdd(connector.Settings.ConnectionString, _ => - { - var builder = new CrsMapBuilder(); - using var cmd = connector.CreateCommand( - "SELECT min(srid), max(srid), auth_name " + - "FROM(SELECT srid, auth_name, srid - rank() OVER(ORDER BY srid) AS range " + - "FROM spatial_ref_sys) AS s GROUP BY range, auth_name ORDER BY 1;"); - cmd.AllResultTypesAreUnknown = true; - using var reader = cmd.ExecuteReader(); - - while (reader.Read()) - { - builder.Add(new CrsMapEntry( - int.Parse(reader.GetString(0)), - int.Parse(reader.GetString(1)), - reader.GetString(2))); - } - - return builder.Build(); - }); - - var (pgGeometryType, pgGeographyType) = (PgType("geometry"), PgType("geography")); - - if (pgGeometryType is not null) - _geometryHandler = new GeoJsonHandler(pgGeometryType, options, crsMap); - if (pgGeographyType is not null) - _geographyHandler = new GeoJsonHandler(pgGeographyType, options, crsMap); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "geometry" => _geometryHandler, - "geography" => _geographyHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _geographyAsDefault) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) - => type.BaseType != typeof(GeoJSONObject) - ? null - : geographyAsDefault - ? "geography" - : "geometry"; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "geometry" => new(NpgsqlDbType.Geometry, "geometry"), - "geography" => new(NpgsqlDbType.Geography, "geography"), - _ => null - }; - - PostgresType? PgType(string pgTypeName) => _databaseInfo.TryGetPostgresTypeByName(pgTypeName, out var pgType) ? pgType : null; -} \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs deleted file mode 100644 index 16d8ae54d9..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,24 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.TypeMapping; - -namespace Npgsql.GeoJSON.Internal; - -public class GeoJSONTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly GeoJSONOptions _options; - readonly bool _geographyAsDefault; - - public GeoJSONTypeHandlerResolverFactory(GeoJSONOptions options, bool geographyAsDefault) - => (_options, _geographyAsDefault) = (options, geographyAsDefault); - - public override TypeHandlerResolver Create(NpgsqlConnector connector) - => new GeoJSONTypeHandlerResolver(connector, _options, _geographyAsDefault); - - public override string? GetDataTypeNameByClrType(Type type) - => GeoJSONTypeHandlerResolver.ClrTypeToDataTypeName(type, _geographyAsDefault); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => GeoJSONTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..f1b56000f2 --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs @@ -0,0 +1,92 @@ +using System; +using GeoJSON.Net; +using GeoJSON.Net.Geometry; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.GeoJSON.Internal; + +sealed class GeoJSONTypeInfoResolverFactory(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(options, geographyAsDefault, crsMap); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(options, geographyAsDefault, crsMap); + + class Resolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), options, geographyAsDefault, crsMap); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonOptions, + bool geographyAsDefault, CrsMap? crsMap) + { + crsMap ??= new CrsMap(CrsMap.WellKnown); + + var geometryMatchRequirement = !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; + var geographyMatchRequirement = geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; + + foreach (var dataTypeName in new[] { "geometry", "geography" }) + { + var matchRequirement = dataTypeName == "geometry" ? geometryMatchRequirement : geographyMatchRequirement; + + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + } + + return mappings; + } + } + + sealed class ArrayResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : Resolver(options, geographyAsDefault, crsMap), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { "geometry", "geography" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + return mappings; + } + } +} diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index bff8f6cede..136d4b5635 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -1,10 +1,10 @@ - + Yoh Deadfall;Shay Rojansky GeoJSON plugin for Npgsql, allowing mapping of PostGIS geometry types to GeoJSON types. npgsql;postgresql;postgres;postgis;geojson;spatial;ado;ado.net;database;sql - netstandard2.0 - net7.0 + net10.0 + $(NoWarn);NPG9001 diff --git a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs index c59b0b21c7..4984148f0f 100644 --- a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs +++ b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs @@ -1,4 +1,5 @@ -using Npgsql.GeoJSON.Internal; +using Npgsql.GeoJSON; +using Npgsql.GeoJSON.Internal; using Npgsql.TypeMapping; // ReSharper disable once CheckNamespace @@ -9,6 +10,7 @@ namespace Npgsql; /// public static class NpgsqlGeoJSONExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up GeoJSON mappings for the PostGIS types. /// @@ -17,7 +19,48 @@ public static class NpgsqlGeoJSONExtensions /// Specifies that the geography type is used for mapping by default. public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) { - mapper.AddTypeResolverFactory(new GeoJSONTypeHandlerResolverFactory(options, geographyAsDefault)); + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap: null)); return mapper; } -} \ No newline at end of file + + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. + /// + /// Sets up GeoJSON mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific) + /// A custom crs map that might contain more or less entries than the default well-known crs map. + /// Options to use when constructing objects. + /// Specifies that the geography type is used for mapping by default. + public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, CrsMap crsMap, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) + { + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap)); + return mapper; + } + + /// + /// Sets up GeoJSON mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific) + /// Options to use when constructing objects. + /// Specifies that the geography type is used for mapping by default. + public static TMapper UseGeoJson(this TMapper mapper, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) + where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap: null)); + return mapper; + } + + /// + /// Sets up GeoJSON mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific) + /// A custom crs map that might contain more or less entries than the default well-known crs map. + /// Options to use when constructing objects. + /// Specifies that the geography type is used for mapping by default. + public static TMapper UseGeoJson(this TMapper mapper, CrsMap crsMap, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) + where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap)); + return mapper; + } +} diff --git a/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs b/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs index 1a340b1a15..f30dbdd96f 100644 --- a/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs +++ b/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt index 13ced34dc9..2281dba39d 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt @@ -1,8 +1,15 @@ -#nullable enable +#nullable enable +Npgsql.GeoJSON.CrsMap +Npgsql.GeoJSON.CrsMapExtensions Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.BoundingBox = 1 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.LongCRS = 4 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.None = 0 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.ShortCRS = 2 -> Npgsql.GeoJSONOptions Npgsql.NpgsqlGeoJSONExtensions -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file +static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMap(this Npgsql.NpgsqlDataSource! dataSource) -> Npgsql.GeoJSON.CrsMap! +static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMapAsync(this Npgsql.NpgsqlDataSource! dataSource) -> System.Threading.Tasks.Task! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper diff --git a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt index ab058de62d..7dc5c58110 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt @@ -1 +1 @@ -#nullable enable +#nullable enable diff --git a/src/Npgsql.Json.NET/Internal/JsonHandler.cs b/src/Npgsql.Json.NET/Internal/JsonHandler.cs deleted file mode 100644 index 56a6683a5f..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonHandler.cs +++ /dev/null @@ -1,100 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Json.NET.Internal; - -class JsonHandler : Npgsql.Internal.TypeHandlers.JsonHandler -{ - readonly JsonSerializerSettings _settings; - - public JsonHandler(PostgresType postgresType, NpgsqlConnector connector, JsonSerializerSettings settings) - : base(postgresType, connector.TextEncoding, isJsonb: false) => _settings = settings; - - protected override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[])) - { - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - // JSON.NET returns null if no JSON content was found. This means null may get returned even if T is a non-nullable reference - // type (for value types, an exception will be thrown). - return JsonConvert.DeserializeObject(await base.Read(buf, len, async, fieldDescription), _settings)!; - } - - protected override int ValidateAndGetLengthCustom([DisallowNull] T2 value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - } - - var serialized = JsonConvert.SerializeObject(value, _settings); - if (parameter != null) - parameter.ConvertedValue = serialized; - return base.ValidateAndGetLengthCustom(serialized, ref lengthCache, parameter); - } - - protected override Task WriteWithLengthCustom([DisallowNull] T2 value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - } - - // User POCO, read serialized representation from the validation phase - var serialized = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonConvert.SerializeObject(value, _settings); - return base.WriteWithLengthCustom(serialized, buf, lengthCache, parameter, async, cancellationToken); - } - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.ValidateObjectAndGetLength(value, ref lengthCache, parameter); - } - - return ValidateAndGetLength(value, ref lengthCache, parameter); - } - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null || - value is DBNull || - value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - } - - return WriteWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - } -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs new file mode 100644 index 0000000000..b365f65a53 --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs @@ -0,0 +1,110 @@ +using System; +using System.Globalization; +using System.IO; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; +using Npgsql.Internal; +using JsonSerializer = Newtonsoft.Json.JsonSerializer; + +namespace Npgsql.Json.NET.Internal; + +sealed class JsonNetJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) : PgStreamingConverter +{ + public override T? Read(PgReader reader) + => (T?)JsonNetJsonConverter.Read(async: false, jsonb, reader, typeof(T), settings, textEncoding, CancellationToken.None).GetAwaiter().GetResult(); + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => (T?)await JsonNetJsonConverter.Read(async: true, jsonb, reader, typeof(T), settings, textEncoding, cancellationToken).ConfigureAwait(false); + + public override Size GetSize(SizeContext context, T? value, ref object? writeState) + => JsonNetJsonConverter.GetSize(jsonb, context, typeof(T), settings, textEncoding, value, ref writeState); + + public override void Write(PgWriter writer, T? value) + => JsonNetJsonConverter.Write(jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) + => JsonNetJsonConverter.Write(jsonb, async: true, writer, cancellationToken); +} + +// Split out to avoid unnecessary code duplication. +static class JsonNetJsonConverter +{ + public const byte JsonbProtocolVersion = 1; + + public static async ValueTask Read(bool async, bool jsonb, PgReader reader, Type type, JsonSerializerSettings settings, Encoding encoding, CancellationToken cancellationToken) + { + if (jsonb) + { + if (reader.ShouldBuffer(sizeof(byte))) + { + if (async) + await reader.BufferAsync(sizeof(byte), cancellationToken).ConfigureAwait(false); + else + reader.Buffer(sizeof(byte)); + } + var version = reader.ReadByte(); + if (version != JsonbProtocolVersion) + throw new InvalidCastException($"Unknown jsonb wire format version {version}"); + } + + using var stream = reader.GetStream(); + var mem = new MemoryStream(); + if (async) + await stream.CopyToAsync(mem, Math.Min((int)stream.Length, 81920), cancellationToken).ConfigureAwait(false); + else + stream.CopyTo(mem); + mem.Position = 0; + var jsonSerializer = JsonSerializer.CreateDefault(settings); + using var textReader = new JsonTextReader(new StreamReader(mem, encoding)); + return jsonSerializer.Deserialize(textReader, type); + } + + public static Size GetSize(bool jsonb, SizeContext context, Type type, JsonSerializerSettings settings, Encoding encoding, object? value, ref object? writeState) + { + var jsonSerializer = JsonSerializer.CreateDefault(settings); + var sb = new StringBuilder(256); + var sw = new StringWriter(sb, CultureInfo.InvariantCulture); + using (var jsonWriter = new JsonTextWriter(sw)) + { + jsonWriter.Formatting = jsonSerializer.Formatting; + + jsonSerializer.Serialize(jsonWriter, value, type); + } + + var str = sw.ToString(); + var bytes = encoding.GetBytes(str); + writeState = bytes; + return bytes.Length + (jsonb ? sizeof(byte) : 0); + } + + public static async ValueTask Write(bool jsonb, bool async, PgWriter writer, CancellationToken cancellationToken) + { + if (jsonb) + { + if (writer.ShouldFlush(sizeof(byte))) + { + if (async) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + else + writer.Flush(); + } + writer.WriteByte(JsonbProtocolVersion); + } + + ArraySegment buffer; + switch (writer.Current.WriteState) + { + case byte[] bytes: + buffer = new ArraySegment(bytes); + break; + default: + throw new InvalidCastException($"Invalid state {writer.Current.WriteState?.GetType().FullName}."); + } + + if (async) + await writer.WriteBytesAsync(buffer.AsMemory(), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(buffer.AsSpan()); + } +} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..8899eddb60 --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs @@ -0,0 +1,120 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using Newtonsoft.Json; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Json.NET.Internal; + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class JsonNetPocoTypeInfoResolverFactory( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null, + JsonSerializerSettings? serializerSettings = null) + : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(jsonbClrTypes, jsonClrTypes, serializerSettings); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(jsonbClrTypes, jsonClrTypes, serializerSettings); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + class Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + : DynamicTypeInfoResolver, IPgTypeInfoResolver + { + readonly JsonSerializerSettings _serializerSettings = serializerSettings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), jsonbClrTypes ?? [], jsonClrTypes ?? [], _serializerSettings); + + const string JsonDataTypeName = "pg_catalog.json"; + const string JsonbDataTypeName = "pg_catalog.jsonb"; + + // Capture default settings during construction. + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerSettings serializerSettings) + { + AddUserMappings(mappings, jsonb: true, jsonbClrTypes, serializerSettings); + AddUserMappings(mappings, jsonb: false, jsonClrTypes, serializerSettings); + return mappings; + + static void AddUserMappings(TypeInfoMappingCollection mappings, bool jsonb, Type[] clrTypes, JsonSerializerSettings serializerSettings) + { + var dynamicMappings = CreateCollection(); + var dataTypeName = jsonb ? JsonbDataTypeName : JsonDataTypeName; + foreach (var jsonType in clrTypes) + { + dynamicMappings.AddMapping(jsonType, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateConverter(mapping.Type, jsonb, options.TextEncoding, serializerSettings))); + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. + if (type is null || type == typeof(object) || PgSerializerOptions.IsWellKnownTextType(type) + || dataTypeName != JsonbDataTypeName && dataTypeName != JsonDataTypeName) + return null; + + var matchedType = Nullable.GetUnderlyingType(type) ?? type; + + return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => + { + var jsonb = dataTypeName == JsonbDataTypeName; + return mapping.CreateInfo(options, + CreateConverter(mapping.Type, jsonb, options.TextEncoding, _serializerSettings)); + }); + } + + static PgConverter CreateConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) + => (PgConverter)Activator.CreateInstance( + typeof(JsonNetJsonConverter<>).MakeGenericType(valueType), + jsonb, + textEncoding, + settings + )!; + } + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + sealed class ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + : Resolver(jsonbClrTypes, jsonClrTypes, serializerSettings), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), base.Mappings); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + { + if (baseMappings.Items.Count == 0) + return mappings; + + var dynamicMappings = CreateCollection(baseMappings); + foreach (var mapping in baseMappings.Items) + { + // Always handle Nullable mappings as part of the underlying type. + if (Nullable.GetUnderlyingType(mapping.Type) is not null) + continue; + dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + + return mappings; + } + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) + : null; + } +} + diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs deleted file mode 100644 index 25ce0d5e92..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Data; -using Newtonsoft.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Json.NET.Internal; - -public class JsonNetTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly JsonbHandler _jsonbHandler; - readonly JsonHandler _jsonHandler; - readonly Dictionary _dataTypeNamesByClrType; - - internal JsonNetTypeHandlerResolver( - NpgsqlConnector connector, - Dictionary dataClrTypeNamesDataTypeNamesByClrClrType, - JsonSerializerSettings settings) - { - _databaseInfo = connector.DatabaseInfo; - - _jsonbHandler = new JsonbHandler(PgType("jsonb"), connector, settings); - _jsonHandler = new JsonHandler(PgType("json"), connector, settings); - - _dataTypeNamesByClrType = dataClrTypeNamesDataTypeNamesByClrClrType; - } - - public NpgsqlTypeHandler? ResolveNpgsqlDbType(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch - { - NpgsqlDbType.Jsonb => _jsonbHandler, - NpgsqlDbType.Json => _jsonHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "jsonb" => _jsonbHandler, - "json" => _jsonHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _dataTypeNamesByClrType) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - internal static string? ClrTypeToDataTypeName(Type type, Dictionary clrTypes) - => clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb"), - "json" => new(NpgsqlDbType.Json, "json"), - _ => null - }; - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs deleted file mode 100644 index 9a047fff9e..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,46 +0,0 @@ -using System; -using System.Collections.Generic; -using Newtonsoft.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.TypeMapping; - -namespace Npgsql.Json.NET.Internal; - -public class JsonNetTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly Type[] _jsonbClrTypes; - readonly Type[] _jsonClrTypes; - readonly JsonSerializerSettings _settings; - readonly Dictionary _byType; - - public JsonNetTypeHandlerResolverFactory( - Type[]? jsonbClrTypes, - Type[]? jsonClrTypes, - JsonSerializerSettings? settings) - { - _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); - _jsonClrTypes = jsonClrTypes ?? Array.Empty(); - _settings = settings ?? new JsonSerializerSettings(); - - _byType = new(); - - if (jsonbClrTypes is not null) - foreach (var type in jsonbClrTypes) - _byType[type] = "jsonb"; - - if (jsonClrTypes is not null) - foreach (var type in jsonClrTypes) - _byType[type] = "json"; - } - - public override TypeHandlerResolver Create(NpgsqlConnector connector) - => new JsonNetTypeHandlerResolver(connector, _byType, _settings); - - public override string? GetDataTypeNameByClrType(Type type) - => JsonNetTypeHandlerResolver.ClrTypeToDataTypeName(type, _byType); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => JsonNetTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); - -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..be2e0a3ba7 --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs @@ -0,0 +1,67 @@ +using System; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Json.NET.Internal; + +sealed class JsonNetTypeInfoResolverFactory(JsonSerializerSettings? settings = null) : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(settings); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(settings); + + class Resolver(JsonSerializerSettings? settings = null) : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + readonly JsonSerializerSettings _serializerSettings = settings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _serializerSettings); + + // Capture default settings during construction. + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, JsonSerializerSettings settings) + { + // Jsonb is the first default for JToken etc. + foreach (var dataTypeName in new[] { "jsonb", "json" }) + { + var jsonb = dataTypeName == "jsonb"; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + } + + return mappings; + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + } + + sealed class ArrayResolver(JsonSerializerSettings? settings = null) : Resolver(settings), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { "jsonb", "json" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + return mappings; + } + } +} + diff --git a/src/Npgsql.Json.NET/Internal/JsonbHandler.cs b/src/Npgsql.Json.NET/Internal/JsonbHandler.cs deleted file mode 100644 index e9b88e3d34..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonbHandler.cs +++ /dev/null @@ -1,100 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Json.NET.Internal; - -class JsonbHandler : Npgsql.Internal.TypeHandlers.JsonHandler -{ - readonly JsonSerializerSettings _settings; - - public JsonbHandler(PostgresType postgresType, NpgsqlConnector connector, JsonSerializerSettings settings) - : base(postgresType, connector.TextEncoding, isJsonb: true) => _settings = settings; - - protected override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[])) - { - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - // JSON.NET returns null if no JSON content was found. This means null may get returned even if T is a non-nullable reference - // type (for value types, an exception will be thrown). - return JsonConvert.DeserializeObject(await base.Read(buf, len, async, fieldDescription), _settings)!; - } - - protected override int ValidateAndGetLengthCustom([DisallowNull] T2 value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - } - - var serialized = JsonConvert.SerializeObject(value, _settings); - if (parameter != null) - parameter.ConvertedValue = serialized; - return base.ValidateAndGetLengthCustom(serialized, ref lengthCache, parameter); - } - - protected override Task WriteWithLengthCustom([DisallowNull] T2 value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - } - - // User POCO, read serialized representation from the validation phase - var serialized = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonConvert.SerializeObject(value, _settings); - return base.WriteWithLengthCustom(serialized, buf, lengthCache, parameter, async, cancellationToken); - } - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.ValidateObjectAndGetLength(value, ref lengthCache, parameter); - } - - return ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - } - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null || - value is DBNull || - value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - } - - return WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - } -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index 94d5b9f4d7..32c6cb212f 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -1,10 +1,12 @@ - + Shay Rojansky Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql - netstandard2.0 - net7.0 + net10.0 + enable + false + $(NoWarn);NPG9001 diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index 06f0f2f661..e427a3a0fb 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -1,4 +1,5 @@ -using System; +using System; +using System.Diagnostics.CodeAnalysis; using Npgsql.TypeMapping; using NpgsqlTypes; using Newtonsoft.Json; @@ -12,20 +13,55 @@ namespace Npgsql; /// public static class NpgsqlJsonNetExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up JSON.NET mappings for the PostgreSQL json and jsonb types. /// - /// The type mapper to set up (global or connection-specific) - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify NpgsqlDbType.Jsonb) - /// A list of CLR types to map to PostgreSQL json (no need to specify NpgsqlDbType.Json) - /// Optional settings to customize JSON serialization + /// The type mapper to set up. + /// Optional settings to customize JSON serialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static INpgsqlTypeMapper UseJsonNet( this INpgsqlTypeMapper mapper, + JsonSerializerSettings? settings = null, Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null, - JsonSerializerSettings? settings = null) + Type[]? jsonClrTypes = null) { - mapper.AddTypeResolverFactory(new JsonNetTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); + // Reverse order + mapper.AddTypeInfoResolverFactory(new JsonNetPocoTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); + mapper.AddTypeInfoResolverFactory(new JsonNetTypeInfoResolverFactory(settings)); return mapper; } -} \ No newline at end of file + + /// + /// Sets up JSON.NET mappings for the PostgreSQL json and jsonb types. + /// + /// The type mapper to set up. + /// Optional settings to customize JSON serialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public static TMapper UseJsonNet( + this TMapper mapper, + JsonSerializerSettings? settings = null, + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + where TMapper : INpgsqlTypeMapper + { + // Reverse order + mapper.AddTypeInfoResolverFactory(new JsonNetPocoTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); + mapper.AddTypeInfoResolverFactory(new JsonNetTypeInfoResolverFactory(settings)); + return mapper; + } +} diff --git a/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs b/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs index 1a340b1a15..f30dbdd96f 100644 --- a/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs +++ b/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt index 48ddf42ce5..f83708dc0e 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt @@ -1,3 +1,4 @@ -#nullable enable +#nullable enable Npgsql.NpgsqlJsonNetExtensions -static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null, Newtonsoft.Json.JsonSerializerSettings? settings = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this TMapper mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> TMapper diff --git a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt index ab058de62d..7dc5c58110 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt @@ -1 +1 @@ -#nullable enable +#nullable enable diff --git a/src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs b/src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs deleted file mode 100644 index 1a340b1a15..0000000000 --- a/src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,5 +0,0 @@ -using System.Runtime.CompilerServices; - -#if NET5_0_OR_GREATER -[module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs new file mode 100644 index 0000000000..45597e7059 --- /dev/null +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs @@ -0,0 +1,79 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using NetTopologySuite.Geometries; +using NetTopologySuite.IO; +using Npgsql.Internal; + +namespace Npgsql.NetTopologySuite.Internal; + +sealed class NetTopologySuiteConverter : PgStreamingConverter + where T : Geometry +{ + readonly PostGisReader _reader; + readonly PostGisWriter _writer; + + internal NetTopologySuiteConverter(PostGisReader reader, PostGisWriter writer) + => (_reader, _writer) = (reader, writer); + + public override T Read(PgReader reader) + => (T)_reader.Read(reader.GetStream()); + + // PostGisReader/PostGisWriter doesn't support async + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => new(Read(reader)); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + var lengthStream = new LengthStream(); + lengthStream.SetLength(0); + _writer.Write(value, lengthStream); + return (int)lengthStream.Length; + } + + public override void Write(PgWriter writer, T value) + => _writer.Write(value, writer.GetStream(allowMixedIO: true)); + + // PostGisReader/PostGisWriter doesn't support async + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + { + Write(writer, value); + return default; + } + + sealed class LengthStream : Stream + { + long _length; + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override long Length => _length; + + public override long Position + { + get => _length; + set => throw new NotSupportedException(); + } + + public override void Flush() + { + } + + public override int Read(byte[] buffer, int offset, int count) + => throw new NotSupportedException(); + + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException(); + + public override void SetLength(long value) + => _length = value; + + public override void Write(byte[] buffer, int offset, int count) + => _length += count; + } +} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs deleted file mode 100644 index b1cb3783e1..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs +++ /dev/null @@ -1,168 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using NetTopologySuite.Geometries; -using NetTopologySuite.IO; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.NetTopologySuite.Internal; - -partial class NetTopologySuiteHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler -{ - readonly PostGisReader _reader; - readonly PostGisWriter _writer; - readonly LengthStream _lengthStream = new(); - - internal NetTopologySuiteHandler(PostgresType postgresType, PostGisReader reader, PostGisWriter writer) - : base(postgresType) - { - _reader = reader; - _writer = writer; - } - - #region Read - - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask ReadCore(NpgsqlReadBuffer buf, int len) - where T : Geometry - => new((T)_reader.Read(buf.GetStream(len, false))); - - #endregion - - #region ValidateAndGetLength - - public override int ValidateAndGetLength(Geometry value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value); - - int INpgsqlTypeHandler.ValidateAndGetLength(Point value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(LineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(Polygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(MultiPoint value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(MultiLineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(MultiPolygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(GeometryCollection value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int ValidateAndGetLengthCore(Geometry value) - { - _lengthStream.SetLength(0); - _writer.Write(value, _lengthStream); - return (int)_lengthStream.Length; - } - - sealed class LengthStream : Stream - { - long _length; - - public override bool CanRead => false; - - public override bool CanSeek => false; - - public override bool CanWrite => true; - - public override long Length => _length; - - public override long Position - { - get => _length; - set => throw new NotSupportedException(); - } - - public override void Flush() - { } - - public override int Read(byte[] buffer, int offset, int count) - => throw new NotSupportedException(); - - public override long Seek(long offset, SeekOrigin origin) - => throw new NotSupportedException(); - - public override void SetLength(long value) - => _length = value; - - public override void Write(byte[] buffer, int offset, int count) - => _length += count; - } - - #endregion - - #region Write - - public override Task Write(Geometry value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(Point value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(LineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(MultiPoint value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToke) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(MultiLineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(MultiPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(GeometryCollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task WriteCore(Geometry value, NpgsqlWriteBuffer buf) - { - _writer.Write(value, buf.GetStream()); - return Task.CompletedTask; - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs deleted file mode 100644 index f327e93eb6..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs +++ /dev/null @@ -1,73 +0,0 @@ -using System; -using System.Data; -using NetTopologySuite.Geometries; -using NetTopologySuite.IO; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.NetTopologySuite.Internal; - -public class NetTopologySuiteTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly bool _geographyAsDefault; - - readonly NetTopologySuiteHandler? _geometryHandler, _geographyHandler; - - internal NetTopologySuiteTypeHandlerResolver( - NpgsqlConnector connector, - CoordinateSequenceFactory coordinateSequenceFactory, - PrecisionModel precisionModel, - Ordinates handleOrdinates, - bool geographyAsDefault) - { - _databaseInfo = connector.DatabaseInfo; - _geographyAsDefault = geographyAsDefault; - - var (pgGeometryType, pgGeographyType) = (PgType("geometry"), PgType("geography")); - - var reader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); - var writer = new PostGisWriter(); - - if (pgGeometryType is not null) - _geometryHandler = new NetTopologySuiteHandler(pgGeometryType, reader, writer); - if (pgGeographyType is not null) - _geographyHandler = new NetTopologySuiteHandler(pgGeographyType, reader, writer); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "geometry" => _geometryHandler, - "geography" => _geographyHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _geographyAsDefault) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) - => type != typeof(Geometry) && type.BaseType != typeof(Geometry) && type.BaseType != typeof(GeometryCollection) - ? null - : geographyAsDefault - ? "geography" - : "geometry"; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "geometry" => new(NpgsqlDbType.Geometry, "geometry"), - "geography" => new(NpgsqlDbType.Geography, "geography"), - _ => null - }; - - PostgresType? PgType(string pgTypeName) => _databaseInfo.TryGetPostgresTypeByName(pgTypeName, out var pgType) ? pgType : null; -} \ No newline at end of file diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs deleted file mode 100644 index a758e8ac02..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,39 +0,0 @@ -using System; -using System.Data; -using NetTopologySuite; -using NetTopologySuite.Geometries; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.TypeMapping; - -namespace Npgsql.NetTopologySuite.Internal; - -public class NetTopologySuiteTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly CoordinateSequenceFactory _coordinateSequenceFactory; - readonly PrecisionModel _precisionModel; - readonly Ordinates _handleOrdinates; - readonly bool _geographyAsDefault; - - public NetTopologySuiteTypeHandlerResolverFactory( - CoordinateSequenceFactory? coordinateSequenceFactory, - PrecisionModel? precisionModel, - Ordinates handleOrdinates, - bool geographyAsDefault) - { - _coordinateSequenceFactory = coordinateSequenceFactory ?? NtsGeometryServices.Instance.DefaultCoordinateSequenceFactory;; - _precisionModel = precisionModel ?? NtsGeometryServices.Instance.DefaultPrecisionModel; - _handleOrdinates = handleOrdinates == Ordinates.None ? _coordinateSequenceFactory.Ordinates : handleOrdinates; - _geographyAsDefault = geographyAsDefault; - } - - public override TypeHandlerResolver Create(NpgsqlConnector connector) - => new NetTopologySuiteTypeHandlerResolver(connector, _coordinateSequenceFactory, _precisionModel, _handleOrdinates, - _geographyAsDefault); - - public override string? GetDataTypeNameByClrType(Type type) - => NetTopologySuiteTypeHandlerResolver.ClrTypeToDataTypeName(type, _geographyAsDefault); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => NetTopologySuiteTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..2012490fb5 --- /dev/null +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs @@ -0,0 +1,112 @@ +using System; +using NetTopologySuite; +using NetTopologySuite.Geometries; +using NetTopologySuite.IO; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.NetTopologySuite.Internal; + +sealed class NetTopologySuiteTypeInfoResolverFactory( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault); + + class Resolver : IPgTypeInfoResolver + { + readonly PostGisReader _gisReader; + readonly PostGisWriter _gisWriter; + protected readonly bool _geographyAsDefault; + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _gisReader, _gisWriter, _geographyAsDefault); + + public Resolver( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + { + coordinateSequenceFactory ??= NtsGeometryServices.Instance.DefaultCoordinateSequenceFactory; + precisionModel ??= NtsGeometryServices.Instance.DefaultPrecisionModel; + handleOrdinates = handleOrdinates == Ordinates.None ? coordinateSequenceFactory.Ordinates : handleOrdinates; + + _geographyAsDefault = geographyAsDefault; + _gisReader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); + _gisWriter = new PostGisWriter + { + HandleOrdinates = handleOrdinates + }; + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, + bool geographyAsDefault) + { + foreach (var dataTypeName in geographyAsDefault ? ["geography", "geometry"] : new[] { "geometry", "geography" }) + { + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + isDefault: true); + + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + } + + return mappings; + } + } + + sealed class ArrayResolver( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + : Resolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), _geographyAsDefault); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, bool geographyAsDefault) + { + foreach (var dataTypeName in geographyAsDefault ? ["geography", "geometry"] : new[] { "geometry", "geography" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + return mappings; + } + } +} diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index 82a1efa1e6..cce977ba3c 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -1,20 +1,17 @@ - + Shay Rojansky;Yoh Deadfall NetTopologySuite plugin for Npgsql, allowing mapping of PostGIS geometry types to NetTopologySuite types. npgsql;postgresql;postgres;postgis;spatial;nettopologysuite;nts;ado;ado.net;database;sql README.md - netstandard2.0 - net7.0 + net10.0 $(NoWarn);NU5104 + $(NoWarn);NPG9001 - - - @@ -24,6 +21,6 @@ - + diff --git a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs index a867fea349..ea6af4bcdf 100644 --- a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs +++ b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs @@ -1,4 +1,4 @@ -using NetTopologySuite.Geometries; +using NetTopologySuite.Geometries; using Npgsql.NetTopologySuite.Internal; using Npgsql.TypeMapping; @@ -10,6 +10,7 @@ namespace Npgsql; /// public static class NpgsqlNetTopologySuiteExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up NetTopologySuite mappings for the PostGIS types. /// @@ -27,9 +28,29 @@ public static INpgsqlTypeMapper UseNetTopologySuite( Ordinates handleOrdinates = Ordinates.None, bool geographyAsDefault = false) { - mapper.AddTypeResolverFactory( - new NetTopologySuiteTypeHandlerResolverFactory( - coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); + mapper.AddTypeInfoResolverFactory(new NetTopologySuiteTypeInfoResolverFactory(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); return mapper; } -} \ No newline at end of file + + /// + /// Sets up NetTopologySuite mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific). + /// The factory which knows how to build a particular implementation of ICoordinateSequence from an array of Coordinates. + /// Specifies the grid of allowable points. + /// Specifies the ordinates which will be handled. Not specified ordinates will be ignored. + /// If is specified, an actual value will be taken from + /// the property of . + /// Specifies that the geography type is used for mapping by default. + public static TMapper UseNetTopologySuite( + this TMapper mapper, + CoordinateSequenceFactory? coordinateSequenceFactory = null, + PrecisionModel? precisionModel = null, + Ordinates handleOrdinates = Ordinates.None, + bool geographyAsDefault = false) + where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new NetTopologySuiteTypeInfoResolverFactory(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); + return mapper; + } +} diff --git a/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs b/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs index 1a340b1a15..f30dbdd96f 100644 --- a/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt b/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt index a9ca3382e6..d1e505b58e 100644 --- a/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt +++ b/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt @@ -1,3 +1,4 @@ -#nullable enable +#nullable enable Npgsql.NpgsqlNetTopologySuiteExtensions static Npgsql.NpgsqlNetTopologySuiteExtensions.UseNetTopologySuite(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, NetTopologySuite.Geometries.CoordinateSequenceFactory? coordinateSequenceFactory = null, NetTopologySuite.Geometries.PrecisionModel? precisionModel = null, NetTopologySuite.Geometries.Ordinates handleOrdinates = NetTopologySuite.Geometries.Ordinates.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlNetTopologySuiteExtensions.UseNetTopologySuite(this TMapper mapper, NetTopologySuite.Geometries.CoordinateSequenceFactory? coordinateSequenceFactory = null, NetTopologySuite.Geometries.PrecisionModel? precisionModel = null, NetTopologySuite.Geometries.Ordinates handleOrdinates = NetTopologySuite.Geometries.Ordinates.None, bool geographyAsDefault = false) -> TMapper diff --git a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt index 5f282702bb..7dc5c58110 100644 --- a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt @@ -1 +1 @@ - \ No newline at end of file +#nullable enable diff --git a/src/Npgsql.NetTopologySuite/README.md b/src/Npgsql.NetTopologySuite/README.md index ad2f46289b..c38f46c10b 100644 --- a/src/Npgsql.NetTopologySuite/README.md +++ b/src/Npgsql.NetTopologySuite/README.md @@ -2,15 +2,19 @@ Npgsql is the open source .NET data provider for PostgreSQL. It allows you to co This package is an Npgsql plugin which allows you to interact with spatial data provided by the PostgreSQL [PostGIS extension](https://postgis.net); PostGIS is a mature, standard extension considered to provide top-of-the-line database spatial features. On the .NET side, the plugin adds support for the types from the [NetTopologySuite library](https://github.com/NetTopologySuite/NetTopologySuite), allowing you to read and write them directly to PostgreSQL. -To use the NetTopologySuite plugin, simply add a dependency on this package and set it up at program startup: +To use the NetTopologySuite plugin, add a dependency on this package and create a NpgsqlDataSource. ```csharp -NpgsqlConnection.GlobalTypeMapper.UseNetTopologySuite(); -``` +using Npgsql; +using NetTopologySuite.Geometries; -Once this is done, you can simply use NetTopologySuite types when interacting with PostgreSQL: +var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString); + +dataSourceBuilder.UseNetTopologySuite(); + +var dataSource = dataSourceBuilder.Build(); +var conn = await dataSource.OpenConnectionAsync(); -```csharp var point = new Point(new Coordinate(1d, 1d)); conn.ExecuteNonQuery("CREATE TEMP TABLE data (geom GEOMETRY)"); using (var cmd = new NpgsqlCommand("INSERT INTO data (geom) VALUES (@p)", conn)) diff --git a/src/Npgsql.NodaTime/Internal/DateHandler.cs b/src/Npgsql.NodaTime/Internal/DateHandler.cs deleted file mode 100644 index 9ae07b040a..0000000000 --- a/src/Npgsql.NodaTime/Internal/DateHandler.cs +++ /dev/null @@ -1,91 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; -using BclDateHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.DateHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class DateHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - readonly BclDateHandler _bclHandler; - - internal DateHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclDateHandler(postgresType); - - public override LocalDate Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt32() switch - { - int.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDate.MaxIsoValue, - int.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDate.MinIsoValue, - var value => new LocalDate().PlusDays(value + 730119) - }; - - public override int ValidateAndGetLength(LocalDate value, NpgsqlParameter? parameter) - => 4; - - public override void Write(LocalDate value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == LocalDate.MaxIsoValue) - { - buf.WriteInt32(int.MaxValue); - return; - } - if (value == LocalDate.MinIsoValue) - { - buf.WriteInt32(int.MinValue); - return; - } - } - - var totalDaysSinceEra = Period.Between(default, value, PeriodUnits.Days).Days; - buf.WriteInt32(totalDaysSinceEra - 730119); - } - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(int value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - -#if NET6_0_OR_GREATER - DateOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - public int ValidateAndGetLength(DateOnly value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - public void Write(DateOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); -#endif - - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new DateRangeHandler(pgRangeType, this); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs new file mode 100644 index 0000000000..b75f95d659 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using NodaTime; +using Npgsql.Internal; +using NpgsqlTypes; + +namespace Npgsql.NodaTime.Internal; + +public class DateIntervalConverter(PgConverter> rangeConverter, bool dateTimeInfinityConversions) + : PgStreamingConverter +{ + public override DateInterval Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + var range = async + ? await rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : rangeConverter.Read(reader); + + if (range.IsEmpty) + throw new InvalidCastException("Cannot read an empty range as a NodaTime DateInterval."); + + var upperBound = range.UpperBound; + + if (upperBound != LocalDate.MaxIsoValue || !dateTimeInfinityConversions) + upperBound -= Period.FromDays(1); + + return new(range.LowerBound, upperBound); + } + + public override Size GetSize(SizeContext context, DateInterval value, ref object? writeState) + => rangeConverter.GetSize(context, new NpgsqlRange(value.Start, value.End), ref writeState); + + public override void Write(PgWriter writer, DateInterval value) + => rangeConverter.Write(writer, new NpgsqlRange(value.Start, value.End)); + + public override ValueTask WriteAsync(PgWriter writer, DateInterval value, CancellationToken cancellationToken = default) + => rangeConverter.WriteAsync(writer, new NpgsqlRange(value.Start, value.End), cancellationToken); +} diff --git a/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs b/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs deleted file mode 100644 index 050a2f492a..0000000000 --- a/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs +++ /dev/null @@ -1,121 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NodaTime.Internal; - -public partial class DateMultirangeHandler : MultirangeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler> -{ - readonly INpgsqlTypeHandler _dateIntervalHandler; - - public DateMultirangeHandler(PostgresMultirangeType multirangePostgresType, DateRangeHandler rangeHandler) - : base(multirangePostgresType, rangeHandler) - => _dateIntervalHandler = rangeHandler; - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval[]); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval[]); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - async ValueTask INpgsqlTypeHandler.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new DateInterval[numRanges]; - - for (var i = 0; i < multirange.Length; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange[i] = await _dateIntervalHandler.Read(buf, rangeLen, async, fieldDescription); - } - - return multirange; - } - - async ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new List(numRanges); - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange.Add(await _dateIntervalHandler.Read(buf, rangeLen, async, fieldDescription)); - } - - return multirange; - } - - public int ValidateAndGetLength(DateInterval[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - public int ValidateAndGetLength(List value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - int ValidateAndGetLengthCore(IList value, ref NpgsqlLengthCache? lengthCache) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var sum = 4 + 4 * value.Count; - for (var i = 0; i < value.Count; i++) - sum += _dateIntervalHandler.ValidateAndGetLength(value[i], ref lengthCache, parameter: null); - - return lengthCache!.Set(sum); - } - - public async Task Write( - DateInterval[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Length); - - for (var i = 0; i < value.Length; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - public async Task Write( - List value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Count); - - for (var i = 0; i < value.Count; i++) - { - var interval = value[i]; - await RangeHandler.WriteWithLength( - new NpgsqlRange(interval.Start, interval.End), buf, lengthCache, parameter: null, async, cancellationToken); - } - } -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs b/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs deleted file mode 100644 index cd24f191ce..0000000000 --- a/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs +++ /dev/null @@ -1,70 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -public partial class DateRangeHandler : RangeHandler, INpgsqlTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlTypeHandler> -#endif -{ - public DateRangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType, subtypeHandler) - { - } - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - var range = await Read(buf, len, async, fieldDescription); - - var upperBound = range.UpperBound; - - if (DisableDateTimeInfinityConversions || upperBound != LocalDate.MaxIsoValue) - upperBound -= Period.FromDays(1); - - return new(range.LowerBound, upperBound); - } - - public int ValidateAndGetLength(DateInterval value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(new NpgsqlRange(value.Start, value.End), ref lengthCache, parameter); - - public Task Write( - DateInterval value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - => WriteRange(new NpgsqlRange(value.Start, value.End), buf, lengthCache, parameter, async, cancellationToken); - -#if NET6_0_OR_GREATER - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public Task Write( - NpgsqlRange value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); -#endif -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/DurationConverter.cs b/src/Npgsql.NodaTime/Internal/DurationConverter.cs new file mode 100644 index 0000000000..940ef29464 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/DurationConverter.cs @@ -0,0 +1,42 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.NodaTime.Properties; + +namespace Npgsql.NodaTime.Internal; + +sealed class DurationConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override Duration ReadCore(PgReader reader) + { + var microsecondsInDay = reader.ReadInt64(); + var days = reader.ReadInt32(); + var totalMonths = reader.ReadInt32(); + + if (totalMonths != 0) + throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadIntervalWithMonthsAsDuration); + + return Duration.FromDays(days) + Duration.FromNanoseconds(microsecondsInDay * 1000); + } + + protected override void WriteCore(PgWriter writer, Duration value) + { + const long microsecondsPerSecond = 1_000_000; + + // Note that the end result must be long + // see #3438 + var microsecondsInDay = + (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * + microsecondsPerSecond + value.SubsecondNanoseconds / 1000); // Take the microseconds, discard the nanosecond remainder + + writer.WriteInt64(microsecondsInDay); + writer.WriteInt32(value.Days); // days + writer.WriteInt32(0); // months + } +} diff --git a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs new file mode 100644 index 0000000000..3c877f6119 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs @@ -0,0 +1,61 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using NodaTime; +using Npgsql.Internal; +using NpgsqlTypes; + +namespace Npgsql.NodaTime.Internal; + +sealed class IntervalConverter(PgConverter> rangeConverter, bool dateTimeInfinityConversions) : PgStreamingConverter +{ + public override Interval Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + var range = async + ? await rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : rangeConverter.Read(reader); + + if (range.IsEmpty) + throw new InvalidCastException("Cannot read an empty range as a NodaTime Interval."); + + // NodaTime Interval includes the start instant and excludes the end instant. + Instant? start = range.LowerBoundInfinite + ? null + : range.LowerBoundIsInclusive + ? range.LowerBound + : range.LowerBound + Duration.Epsilon; + // For ranges with element types with infinity values (datetime, date etc.) an + // inclusive lower/upper bound causes their -/+ infinity (respectively) to fall within the range. + // If those values are returned for such a range postgres will not mark the affected bound as infinite accordingly. + // This is documented in https://www.postgresql.org/docs/current/rangetypes.html#RANGETYPES-INFINITE + // As NodaTime uses an exclusive upper bound we must consider this case as being another form of infinity (null). + Instant? end = range.UpperBoundInfinite || (dateTimeInfinityConversions && range.UpperBoundIsInclusive && range.UpperBound == Instant.MaxValue) + ? null + : range.UpperBoundIsInclusive + ? range.UpperBound + Duration.Epsilon + : range.UpperBound; + + return new(start, end); + } + + public override Size GetSize(SizeContext context, Interval value, ref object? writeState) + => rangeConverter.GetSize(context, IntervalToNpgsqlRange(value), ref writeState); + + public override void Write(PgWriter writer, Interval value) + => rangeConverter.Write(writer, IntervalToNpgsqlRange(value)); + + public override ValueTask WriteAsync(PgWriter writer, Interval value, CancellationToken cancellationToken = default) + => rangeConverter.WriteAsync(writer, IntervalToNpgsqlRange(value), cancellationToken); + + static NpgsqlRange IntervalToNpgsqlRange(Interval interval) + => new( + interval.HasStart ? interval.Start : default, true, !interval.HasStart, + interval.HasEnd ? interval.End : default, false, !interval.HasEnd); +} diff --git a/src/Npgsql.NodaTime/Internal/IntervalHandler.cs b/src/Npgsql.NodaTime/Internal/IntervalHandler.cs deleted file mode 100644 index 4e9305a20b..0000000000 --- a/src/Npgsql.NodaTime/Internal/IntervalHandler.cs +++ /dev/null @@ -1,106 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using BclIntervalHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.IntervalHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class IntervalHandler : - NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler -{ - readonly BclIntervalHandler _bclHandler; - - internal IntervalHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclIntervalHandler(postgresType); - - public override Period Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var microsecondsInDay = buf.ReadInt64(); - var days = buf.ReadInt32(); - var totalMonths = buf.ReadInt32(); - - // NodaTime will normalize most things (i.e. nanoseconds to milliseconds, seconds...) - // but it will not normalize months to years. - var months = totalMonths % 12; - var years = totalMonths / 12; - - return new PeriodBuilder - { - Nanoseconds = microsecondsInDay * 1000, - Days = days, - Months = months, - Years = years - }.Build().Normalize(); - } - - public override int ValidateAndGetLength(Period value, NpgsqlParameter? parameter) - => 16; - - public override void Write(Period value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - // Note that the end result must be long - // see #3438 - var microsecondsInDay = - (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * NodaConstants.MillisecondsPerSecond + value.Milliseconds) * 1000 + - value.Nanoseconds / 1000; // Take the microseconds, discard the nanosecond remainder - - buf.WriteInt64(microsecondsInDay); - buf.WriteInt32(value.Weeks * 7 + value.Days); // days - buf.WriteInt32(value.Years * 12 + value.Months); // months - } - - Duration INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var microsecondsInDay = buf.ReadInt64(); - var days = buf.ReadInt32(); - var totalMonths = buf.ReadInt32(); - - if (totalMonths != 0) - throw new NpgsqlException("Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead."); - - return Duration.FromDays(days) + Duration.FromNanoseconds(microsecondsInDay * 1000); - } - - public int ValidateAndGetLength(Duration value, NpgsqlParameter? parameter) => 16; - - public void Write(Duration value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - const long microsecondsPerSecond = 1_000_000; - - // Note that the end result must be long - // see #3438 - var microsecondsInDay = - (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * - microsecondsPerSecond + value.SubsecondNanoseconds / 1000); // Take the microseconds, discard the nanosecond remainder - - buf.WriteInt64(microsecondsInDay); - buf.WriteInt32(value.Days); // days - buf.WriteInt32(0); // months - } - - TimeSpan INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - NpgsqlInterval INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(NpgsqlInterval value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(NpgsqlInterval value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/LegacyConverters.cs b/src/Npgsql.NodaTime/Internal/LegacyConverters.cs new file mode 100644 index 0000000000..c0b4b82268 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/LegacyConverters.cs @@ -0,0 +1,62 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +sealed class LegacyTimestampTzZonedDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) + : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override ZonedDateTime ReadCore(PgReader reader) + { + var instant = DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions); + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); + + return instant.InZone(dateTimeZone); + } + + protected override void WriteCore(PgWriter writer, ZonedDateTime value) + { + var instant = value.ToInstant(); + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new ArgumentException("Infinity values not supported for timestamp with time zone"); + + writer.WriteInt64(EncodeInstant(instant, dateTimeInfinityConversions)); + } +} + +sealed class LegacyTimestampTzOffsetDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) + : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override OffsetDateTime ReadCore(PgReader reader) + { + var instant = DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions); + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); + + return instant.InZone(dateTimeZone).ToOffsetDateTime(); + } + + protected override void WriteCore(PgWriter writer, OffsetDateTime value) + { + var instant = value.ToInstant(); + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new ArgumentException("Infinity values not supported for timestamp with time zone"); + + writer.WriteInt64(EncodeInstant(instant, true)); + } +} diff --git a/src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs b/src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs deleted file mode 100644 index ee2ba1a130..0000000000 --- a/src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs +++ /dev/null @@ -1,64 +0,0 @@ -using System; -using System.Diagnostics; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimestampHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class LegacyTimestampHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimestampHandler _bclHandler; - - internal LegacyTimestampHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimestampHandler(postgresType); - - #region Read - - public override Instant Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => TimestampTzHandler.ReadInstant(buf); - - LocalDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => TimestampHandler.ReadLocalDateTime(buf); - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(Instant value, NpgsqlParameter? parameter) - => 8; - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(LocalDateTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(Instant value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => TimestampTzHandler.WriteInstant(value, buf); - - void INpgsqlSimpleTypeHandler.Write(LocalDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => TimestampHandler.WriteLocalDateTime(value, buf); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs b/src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs deleted file mode 100644 index c299193343..0000000000 --- a/src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs +++ /dev/null @@ -1,121 +0,0 @@ -using System; -using NodaTime; -using NodaTime.TimeZones; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimestampTzHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampTzHandler; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class LegacyTimestampTzHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly IDateTimeZoneProvider _dateTimeZoneProvider; - readonly TimestampTzHandler _wrappedHandler; - - public LegacyTimestampTzHandler(PostgresType postgresType) - : base(postgresType) - { - _dateTimeZoneProvider = DateTimeZoneProviders.Tzdb; - _wrappedHandler = new TimestampTzHandler(postgresType); - } - - #region Read - - public override Instant Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => _wrappedHandler.Read(buf, len, fieldDescription); - - ZonedDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - try - { - var instant = Read(buf, len, fieldDescription); - - if (!DisableDateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) - throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - - return instant.InZone(_dateTimeZoneProvider[buf.Connection.Timezone]); - } - catch (Exception e) when ( - string.Equals(buf.Connection.Timezone, "localtime", StringComparison.OrdinalIgnoreCase) && - (e is TimeZoneNotFoundException || e is DateTimeZoneNotFoundException)) - { - throw new TimeZoneNotFoundException( - "The special PostgreSQL timezone 'localtime' is not supported when reading values of type 'timestamp with time zone'. " + - "Please specify a real timezone in 'postgresql.conf' on the server, or set the 'PGTZ' environment variable on the client.", - e); - } - } - - OffsetDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)this).Read(buf, len, fieldDescription).ToOffsetDateTime(); - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _wrappedHandler.Read(buf, len, fieldDescription); - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _wrappedHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _wrappedHandler.Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(Instant value, NpgsqlParameter? parameter) - => 8; - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(ZonedDateTime value, NpgsqlParameter? parameter) - => 8; - - public int ValidateAndGetLength(OffsetDateTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(Instant value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _wrappedHandler.Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(ZonedDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var instant = value.ToInstant(); - - if (!DisableDateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) - throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - - _wrappedHandler.Write(instant, buf, parameter); - } - - public void Write(OffsetDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var instant = value.ToInstant(); - - if (!DisableDateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) - throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - - _wrappedHandler.Write(instant, buf, parameter); - } - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs b/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs new file mode 100644 index 0000000000..ffaa6e8d45 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs @@ -0,0 +1,47 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.NodaTime.Properties; + +namespace Npgsql.NodaTime.Internal; + +sealed class LocalDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override LocalDate ReadCore(PgReader reader) + => reader.ReadInt32() switch + { + int.MaxValue => dateTimeInfinityConversions + ? LocalDate.MaxIsoValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + int.MinValue => dateTimeInfinityConversions + ? LocalDate.MinIsoValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + var value => new LocalDate().PlusDays(value + 730119) + }; + + protected override void WriteCore(PgWriter writer, LocalDate value) + { + if (dateTimeInfinityConversions) + { + if (value == LocalDate.MaxIsoValue) + { + writer.WriteInt32(int.MaxValue); + return; + } + if (value == LocalDate.MinIsoValue) + { + writer.WriteInt32(int.MinValue); + return; + } + } + + var totalDaysSinceEra = Period.Between(default, value, PeriodUnits.Days).Days; + writer.WriteInt32(totalDaysSinceEra - 730119); + } +} diff --git a/src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs b/src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs new file mode 100644 index 0000000000..5849f45dfc --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs @@ -0,0 +1,20 @@ +using NodaTime; +using Npgsql.Internal; + +namespace Npgsql.NodaTime.Internal; + +sealed class LocalTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + // PostgreSQL time resolution == 1 microsecond == 10 ticks + protected override LocalTime ReadCore(PgReader reader) + => LocalTime.FromTicksSinceMidnight(reader.ReadInt64() * 10); + + protected override void WriteCore(PgWriter writer, LocalTime value) + => writer.WriteInt64(value.TickOfDay / 10); +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs deleted file mode 100644 index 3d51c7f82c..0000000000 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs +++ /dev/null @@ -1,228 +0,0 @@ -using System; -using System.Collections.Generic; -using NodaTime; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -public class NodaTimeTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - - readonly NpgsqlTypeHandler _timestampHandler; - readonly NpgsqlTypeHandler _timestampTzHandler; - readonly DateHandler _dateHandler; - readonly TimeHandler _timeHandler; - readonly TimeTzHandler _timeTzHandler; - readonly IntervalHandler _intervalHandler; - - TimestampTzRangeHandler? _timestampTzRangeHandler; - DateRangeHandler? _dateRangeHandler; - DateMultirangeHandler? _dateMultirangeHandler; - TimestampTzMultirangeHandler? _timestampTzMultirangeHandler; - - NpgsqlTypeHandler? _timestampTzRangeArray; - NpgsqlTypeHandler? _dateRangeArray; - - readonly ArrayNullabilityMode _arrayNullabilityMode; - - internal NodaTimeTypeHandlerResolver(NpgsqlConnector connector) - { - _databaseInfo = connector.DatabaseInfo; - - _timestampHandler = LegacyTimestampBehavior - ? new LegacyTimestampHandler(PgType("timestamp without time zone")) - : new TimestampHandler(PgType("timestamp without time zone")); - _timestampTzHandler = LegacyTimestampBehavior - ? new LegacyTimestampTzHandler(PgType("timestamp with time zone")) - : new TimestampTzHandler(PgType("timestamp with time zone")); - _dateHandler = new DateHandler(PgType("date")); - _timeHandler = new TimeHandler(PgType("time without time zone")); - _timeTzHandler = new TimeTzHandler(PgType("time with time zone")); - _intervalHandler = new IntervalHandler(PgType("interval")); - - // Note that the range handlers are absent on some pseudo-PostgreSQL databases (e.g. CockroachDB), and multirange types - // were only introduced in PG14. So we resolve these lazily. - - _arrayNullabilityMode = connector.Settings.ArrayNullabilityMode; - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "timestamp" or "timestamp without time zone" => _timestampHandler, - "timestamptz" or "timestamp with time zone" => _timestampTzHandler, - "date" => _dateHandler, - "time without time zone" => _timeHandler, - "time with time zone" => _timeTzHandler, - "interval" => _intervalHandler, - - "tstzrange" => TsTzRange(), - "daterange" => DateRange(), - "tstzmultirange" => TsTzMultirange(), - "datemultirange" => DateMultirange(), - - "tstzrange[]" => TsTzRangeArray(), - "daterange[]" => DateRangeArray(), - - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) - { - // This method only ever gets called for value types, and relies on the JIT specializing the method for T by eliding all the - // type checks below. - - if (typeof(T) == typeof(Instant)) - return LegacyTimestampBehavior ? _timestampHandler : _timestampTzHandler; - - if (typeof(T) == typeof(LocalDateTime)) - return _timestampHandler; - if (typeof(T) == typeof(ZonedDateTime)) - return _timestampTzHandler; - if (typeof(T) == typeof(OffsetDateTime)) - return _timestampTzHandler; - if (typeof(T) == typeof(LocalDate)) - return _dateHandler; - if (typeof(T) == typeof(LocalTime)) - return _timeHandler; - if (typeof(T) == typeof(OffsetTime)) - return _timeTzHandler; - if (typeof(T) == typeof(Period)) - return _intervalHandler; - if (typeof(T) == typeof(Duration)) - return _intervalHandler; - - if (typeof(T) == typeof(Interval)) - return _timestampTzRangeHandler; - if (typeof(T) == typeof(NpgsqlRange)) - return _timestampTzRangeHandler; - if (typeof(T) == typeof(NpgsqlRange)) - return _timestampTzRangeHandler; - if (typeof(T) == typeof(NpgsqlRange)) - return _timestampTzRangeHandler; - - // Note that DateInterval is a reference type, so not included in this method - if (typeof(T) == typeof(NpgsqlRange)) - return _dateRangeHandler; - - return null; - } - - internal static string? ClrTypeToDataTypeName(Type type) - { - if (type == typeof(Instant)) - return LegacyTimestampBehavior ? "timestamp without time zone" : "timestamp with time zone"; - - if (type == typeof(LocalDateTime)) - return "timestamp without time zone"; - if (type == typeof(ZonedDateTime) || type == typeof(OffsetDateTime)) - return "timestamp with time zone"; - if (type == typeof(LocalDate)) - return "date"; - if (type == typeof(LocalTime)) - return "time without time zone"; - if (type == typeof(OffsetTime)) - return "time with time zone"; - if (type == typeof(Period) || type == typeof(Duration)) - return "interval"; - - // Ranges - if (type == typeof(NpgsqlRange)) - return "tsrange"; - - if (type == typeof(Interval) || - type == typeof(NpgsqlRange) || - type == typeof(NpgsqlRange) || - type == typeof(NpgsqlRange)) - { - return "tstzrange"; - } - - if (type == typeof(DateInterval) || type == typeof(NpgsqlRange)) - return "daterange"; - - // Multiranges - if (type == typeof(NpgsqlRange[]) || type == typeof(List>)) - return "tsmultirange"; - - if (type == typeof(Interval[]) || - type == typeof(List) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>)) - { - return "tstzmultirange"; - } - if (type == typeof(DateInterval[]) || - type == typeof(List) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>)) - { - return "datemultirange"; - } - - return null; - } - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "timestamp" or "timestamp without time zone" => new(NpgsqlDbType.Timestamp, "timestamp without time zone"), - "timestamptz" or "timestamp with time zone" => new(NpgsqlDbType.TimestampTz, "timestamp with time zone"), - "date" => new(NpgsqlDbType.Date, "date"), - "time without time zone" => new(NpgsqlDbType.Time, "time without time zone"), - "time with time zone" => new(NpgsqlDbType.TimeTz, "time with time zone"), - "interval" => new(NpgsqlDbType.Interval, "interval"), - - "tsrange" => new(NpgsqlDbType.TimestampRange, "tsrange"), - "tstzrange" => new(NpgsqlDbType.TimestampTzRange, "tstzrange"), - "daterange" => new(NpgsqlDbType.DateRange, "daterange"), - - "tsmultirange" => new(NpgsqlDbType.TimestampMultirange, "tsmultirange"), - "tstzmultirange" => new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange"), - "datemultirange" => new(NpgsqlDbType.DateMultirange, "datemultirange"), - - _ => null - }; - - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); - - TimestampTzRangeHandler TsTzRange() - => _timestampTzRangeHandler ??= new TimestampTzRangeHandler(PgType("tstzrange"), _timestampTzHandler); - - DateRangeHandler DateRange() - => _dateRangeHandler ??= new DateRangeHandler(PgType("daterange"), _dateHandler); - - NpgsqlTypeHandler TsTzMultirange() - => _timestampTzMultirangeHandler ??= - new TimestampTzMultirangeHandler((PostgresMultirangeType)PgType("tstzmultirange"), TsTzRange()); - - NpgsqlTypeHandler DateMultirange() - => _dateMultirangeHandler ??= new DateMultirangeHandler((PostgresMultirangeType)PgType("datemultirange"), DateRange()); - - NpgsqlTypeHandler TsTzRangeArray() - => _timestampTzRangeArray ??= - new ArrayHandler((PostgresArrayType)PgType("tstzrange[]"), TsTzRange(), _arrayNullabilityMode); - - NpgsqlTypeHandler DateRangeArray() - => _dateRangeArray ??= - new ArrayHandler((PostgresArrayType)PgType("daterange[]"), DateRange(), _arrayNullabilityMode); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs deleted file mode 100644 index c0eeb6709c..0000000000 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,17 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; - -namespace Npgsql.NodaTime.Internal; - -public class NodaTimeTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(NpgsqlConnector connector) - => new NodaTimeTypeHandlerResolver(connector); - - public override string? GetDataTypeNameByClrType(Type type) - => NodaTimeTypeHandlerResolver.ClrTypeToDataTypeName(type); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => NodaTimeTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs new file mode 100644 index 0000000000..fdd8d4c78f --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs @@ -0,0 +1,149 @@ +using System; +using System.Collections.Generic; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.NodaTime.Internal; + +sealed partial class NodaTimeTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver? CreateMultirangeResolver() => new MultirangeResolver(); + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); + + class MultirangeResolver : IPgTypeInfoResolver + { + protected static DataTypeName DateMultirangeDataTypeName => new("pg_catalog.datemultirange"); + protected static DataTypeName TimestampTzMultirangeDataTypeName => new("pg_catalog.tstzmultirange"); + protected static DataTypeName TimestampMultirangeDataTypeName => new("pg_catalog.tsmultirange"); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzmultirange + mappings.AddType(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddType>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options.EnableDateTimeInfinityConversions), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + + // tsmultirange + mappings.AddType[]>(TimestampMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(TimestampMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + + // datemultirange + mappings.AddType(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddType>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions), options))); + mappings.AddType[]>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + + return mappings; + } + } + + sealed class MultirangeArrayResolver : MultirangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzmultirange + mappings.AddArrayType(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + + // tsmultirange + mappings.AddArrayType[]>(TimestampMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampMultirangeDataTypeName); + + // datemultirange + mappings.AddArrayType(DateMultirangeDataTypeName); + mappings.AddArrayType>(DateMultirangeDataTypeName); + mappings.AddArrayType[]>(DateMultirangeDataTypeName); + mappings.AddArrayType>>(DateMultirangeDataTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs new file mode 100644 index 0000000000..8958f88846 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs @@ -0,0 +1,93 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.NodaTime.Internal; + +sealed partial class NodaTimeTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver? CreateRangeResolver() => new RangeResolver(); + public override IPgTypeInfoResolver? CreateRangeArrayResolver() => new RangeArrayResolver(); + + class RangeResolver : IPgTypeInfoResolver + { + protected static DataTypeName DateRangeDataTypeName => new("pg_catalog.daterange"); + protected static DataTypeName TimestampTzRangeDataTypeName => new("pg_catalog.tstzrange"); + protected static DataTypeName TimestampRangeDataTypeName => new("pg_catalog.tsrange"); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzrange + mappings.AddStructType(TimestampTzRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options.EnableDateTimeInfinityConversions)), + isDefault: true); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); + + // tsrange + mappings.AddStructType>(TimestampRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + + // daterange + mappings.AddType(DateRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType>(DateRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options))); + + return mappings; + } + } + + sealed class RangeArrayResolver : RangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzrange + mappings.AddStructArrayType(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + + // tsrange + mappings.AddStructArrayType>(TimestampRangeDataTypeName); + + // daterange + mappings.AddArrayType(DateRangeDataTypeName); + mappings.AddStructArrayType>(DateRangeDataTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..b010ce58a6 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs @@ -0,0 +1,142 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +sealed partial class NodaTimeTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + protected static DataTypeName TimestampTzDataTypeName => new("pg_catalog.timestamptz"); + protected static DataTypeName TimestampDataTypeName => new("pg_catalog.timestamp"); + protected static DataTypeName DateDataTypeName => new("pg_catalog.date"); + protected static DataTypeName TimeDataTypeName => new("pg_catalog.time"); + protected static DataTypeName TimeTzDataTypeName => new("pg_catalog.timetz"); + protected static DataTypeName IntervalDataTypeName => new("pg_catalog.interval"); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // timestamp and timestamptz, legacy and non-legacy modes + if (LegacyTimestampBehavior) + { + // timestamp is the default for writing an Instant. + + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions))); + + // timestamptz + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LegacyTimestampTzZonedDateTimeConverter( + DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LegacyTimestampTzOffsetDateTimeConverter( + DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); + } + else + { + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + + // timestamptz + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions))); + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions))); + } + + // date + mappings.AddStructType(DateDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + + // time + mappings.AddStructType(TimeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new LocalTimeConverter()), isDefault: true); + + // timetz + mappings.AddStructType(TimeTzDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new OffsetTimeConverter()), isDefault: true); + + // interval + mappings.AddType(IntervalDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new PeriodConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(IntervalDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new DurationConverter())); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + if (LegacyTimestampBehavior) + { + // timestamp + mappings.AddStructArrayType(TimestampDataTypeName); + mappings.AddStructArrayType(TimestampDataTypeName); + + // timestamptz + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + } + else + { + // timestamp + mappings.AddStructArrayType(TimestampDataTypeName); + + // timestamptz + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + } + + // other + mappings.AddStructArrayType(DateDataTypeName); + mappings.AddStructArrayType(TimeDataTypeName); + mappings.AddStructArrayType(TimeTzDataTypeName); + mappings.AddArrayType(IntervalDataTypeName); + mappings.AddStructArrayType(IntervalDataTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs b/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs index ff37bdd196..1cf433759a 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs @@ -1,5 +1,6 @@ using System; using NodaTime; +using Npgsql.NodaTime.Properties; namespace Npgsql.NodaTime.Internal; @@ -7,17 +8,11 @@ static class NodaTimeUtils { #if DEBUG internal static bool LegacyTimestampBehavior; - internal static bool DisableDateTimeInfinityConversions; #else internal static readonly bool LegacyTimestampBehavior; - internal static readonly bool DisableDateTimeInfinityConversions; #endif - static NodaTimeUtils() - { - LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; - DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; - } + static NodaTimeUtils() => LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; static readonly Instant Instant2000 = Instant.FromUtc(2000, 1, 1, 0, 0, 0); static readonly Duration Plus292Years = Duration.FromDays(292 * 365); @@ -27,17 +22,36 @@ static NodaTimeUtils() /// Decodes a PostgreSQL timestamp/timestamptz into a NodaTime Instant. /// /// The number of microseconds from 2000-01-01T00:00:00. + /// Whether infinity date/time conversions are enabled. /// /// Unfortunately NodaTime doesn't have Duration.FromMicroseconds(), so we decompose into milliseconds and nanoseconds. /// - internal static Instant DecodeInstant(long value) - => Instant2000 + Duration.FromMilliseconds(value / 1000) + Duration.FromNanoseconds(value % 1000 * 1000); + internal static Instant DecodeInstant(long value, bool dateTimeInfinityConversions) + => value switch + { + long.MaxValue => dateTimeInfinityConversions + ? Instant.MaxValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + long.MinValue => dateTimeInfinityConversions + ? Instant.MinValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + _ => Instant2000 + Duration.FromMilliseconds(value / 1000) + Duration.FromNanoseconds(value % 1000 * 1000) + }; /// /// Encodes a NodaTime Instant to a PostgreSQL timestamp/timestamptz. /// - internal static long EncodeInstant(Instant instant) + internal static long EncodeInstant(Instant instant, bool dateTimeInfinityConversions) { + if (dateTimeInfinityConversions) + { + if (instant == Instant.MaxValue) + return long.MaxValue; + + if (instant == Instant.MinValue) + return long.MinValue; + } + // We need to write the number of microseconds from 2000-01-01T00:00:00. var since2000 = instant - Instant2000; @@ -46,4 +60,4 @@ internal static long EncodeInstant(Instant instant) ? since2000.ToInt64Nanoseconds() / 1000 : (long)(since2000.ToBigIntegerNanoseconds() / 1000); } -} \ No newline at end of file +} diff --git a/src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs b/src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs new file mode 100644 index 0000000000..7c5499c2f8 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs @@ -0,0 +1,23 @@ +using NodaTime; +using Npgsql.Internal; + +namespace Npgsql.NodaTime.Internal; + +sealed class OffsetTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int)); + return format is DataFormat.Binary; + } + + // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. + protected override OffsetTime ReadCore(PgReader reader) + => new(LocalTime.FromTicksSinceMidnight(reader.ReadInt64() * 10), Offset.FromSeconds(-reader.ReadInt32())); + + protected override void WriteCore(PgWriter writer, OffsetTime value) + { + writer.WriteInt64(value.TickOfDay / 10); + writer.WriteInt32(-(int)(value.Offset.Ticks / NodaConstants.TicksPerSecond)); + } +} diff --git a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs new file mode 100644 index 0000000000..1d768109c4 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs @@ -0,0 +1,88 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.NodaTime.Properties; + +namespace Npgsql.NodaTime.Internal; + +sealed class PeriodConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override Period ReadCore(PgReader reader) + { + var microsecondsInDay = reader.ReadInt64(); + var days = reader.ReadInt32(); + var totalMonths = reader.ReadInt32(); + + if (microsecondsInDay == long.MaxValue && days == int.MaxValue && totalMonths == int.MaxValue) + return dateTimeInfinityConversions + ? Period.MaxValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue); + if (microsecondsInDay == long.MinValue && days == int.MinValue && totalMonths == int.MinValue) + return dateTimeInfinityConversions + ? Period.MinValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue); + + // NodaTime will normalize most things (i.e. nanoseconds to milliseconds, seconds...) + // but it will not normalize months to years. + var months = totalMonths % 12; + var years = totalMonths / 12; + + return new PeriodBuilder + { + Nanoseconds = microsecondsInDay * 1000, + Days = days, + Months = months, + Years = years + }.Build().Normalize(); + } + + protected override void WriteCore(PgWriter writer, Period value) + { + if (dateTimeInfinityConversions) + { + if (value == Period.MaxValue) + { + writer.WriteInt64(long.MaxValue); // microseconds + writer.WriteInt32(int.MaxValue); // days + writer.WriteInt32(int.MaxValue); // months + return; + } + + if (value == Period.MinValue) + { + writer.WriteInt64(long.MinValue); // microseconds + writer.WriteInt32(int.MinValue); // days + writer.WriteInt32(int.MinValue); // months + return; + } + } + + // We have to normalize the value as otherwise we might get a value with 0 everything except for ticks, which we ignore + value = value.Normalize(); + + try + { + checked + { + // Note that the end result must be long + // see #3438 + var microsecondsInDay = + (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * NodaConstants.MillisecondsPerSecond + value.Milliseconds) * 1000 + + value.Nanoseconds / 1000; // Take the microseconds, discard the nanosecond remainder + writer.WriteInt64(microsecondsInDay); + writer.WriteInt32(value.Weeks * 7 + value.Days); // days + writer.WriteInt32(value.Years * 12 + value.Months); // months + } + } + catch (OverflowException ex) + { + throw new ArgumentException(NpgsqlNodaTimeStrings.CannotWritePeriodDueToOverflow, ex); + } + } +} diff --git a/src/Npgsql.NodaTime/Internal/TimeHandler.cs b/src/Npgsql.NodaTime/Internal/TimeHandler.cs deleted file mode 100644 index 5171745764..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimeHandler.cs +++ /dev/null @@ -1,53 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimeHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimeHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimeHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - readonly BclTimeHandler _bclHandler; - - internal TimeHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimeHandler(postgresType); - - // PostgreSQL time resolution == 1 microsecond == 10 ticks - public override LocalTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => LocalTime.FromTicksSinceMidnight(buf.ReadInt64() * 10); - - public override int ValidateAndGetLength(LocalTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(LocalTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value.TickOfDay / 10); - - TimeSpan INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - -#if NET6_0_OR_GREATER - TimeOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - public int ValidateAndGetLength(TimeOnly value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - public void Write(TimeOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); -#endif -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimeTzHandler.cs b/src/Npgsql.NodaTime/Internal/TimeTzHandler.cs deleted file mode 100644 index d8ace650dc..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimeTzHandler.cs +++ /dev/null @@ -1,41 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimeTzHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimeTzHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimeTzHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimeTzHandler _bclHandler; - - internal TimeTzHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimeTzHandler(postgresType); - - // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. - public override OffsetTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new( - LocalTime.FromTicksSinceMidnight(buf.ReadInt64() * 10), - Offset.FromSeconds(-buf.ReadInt32())); - - public override int ValidateAndGetLength(OffsetTime value, NpgsqlParameter? parameter) => 12; - - public override void Write(OffsetTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteInt64(value.TickOfDay / 10); - buf.WriteInt32(-(int)(value.Offset.Ticks / NodaConstants.TicksPerSecond)); - } - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampConverters.cs b/src/Npgsql.NodaTime/Internal/TimestampConverters.cs new file mode 100644 index 0000000000..4ac841c80e --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/TimestampConverters.cs @@ -0,0 +1,86 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +sealed class InstantConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override Instant ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions); + + protected override void WriteCore(PgWriter writer, Instant value) + => writer.WriteInt64(EncodeInstant(value, dateTimeInfinityConversions)); +} + +sealed class ZonedDateTimeConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override ZonedDateTime ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions).InUtc(); + + protected override void WriteCore(PgWriter writer, ZonedDateTime value) + { + if (value.Zone != DateTimeZone.Utc && !LegacyTimestampBehavior) + { + throw new ArgumentException( + $"Cannot write ZonedDateTime with Zone={value.Zone} to PostgreSQL type 'timestamp with time zone', " + + "only UTC is supported. " + + "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); + } + + writer.WriteInt64(EncodeInstant(value.ToInstant(), dateTimeInfinityConversions)); + } +} + +sealed class OffsetDateTimeConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override OffsetDateTime ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions).WithOffset(Offset.Zero); + + protected override void WriteCore(PgWriter writer, OffsetDateTime value) + { + if (value.Offset != Offset.Zero && !LegacyTimestampBehavior) + { + throw new ArgumentException( + $"Cannot write OffsetDateTime with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', " + + "only offset 0 (UTC) is supported. " + + "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); + } + + writer.WriteInt64(EncodeInstant(value.ToInstant(), dateTimeInfinityConversions)); + } +} + +sealed class LocalDateTimeConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override LocalDateTime ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions).InUtc().LocalDateTime; + + protected override void WriteCore(PgWriter writer, LocalDateTime value) + => writer.WriteInt64(EncodeInstant(value.InUtc().ToInstant(), dateTimeInfinityConversions)); +} diff --git a/src/Npgsql.NodaTime/Internal/TimestampHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampHandler.cs deleted file mode 100644 index 15c254e3d0..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampHandler.cs +++ /dev/null @@ -1,88 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using BclTimestampHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampHandler; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimestampHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimestampHandler _bclHandler; - - internal TimestampHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimestampHandler(postgresType); - - #region Read - - public override LocalDateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ReadLocalDateTime(buf); - - internal static LocalDateTime ReadLocalDateTime(NpgsqlReadBuffer buf) - => buf.ReadInt64() switch - { - long.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDateTime.MaxIsoValue, - long.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDateTime.MinIsoValue, - var value => DecodeInstant(value).InUtc().LocalDateTime - }; - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(LocalDateTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(LocalDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => WriteLocalDateTime(value, buf); - - internal static void WriteLocalDateTime(LocalDateTime value, NpgsqlWriteBuffer buf) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == LocalDateTime.MaxIsoValue) - { - buf.WriteInt64(long.MaxValue); - return; - } - - if (value == LocalDateTime.MinIsoValue) - { - buf.WriteInt64(long.MinValue); - return; - } - } - - buf.WriteInt64(EncodeInstant(value.InUtc().ToInstant())); - } - - public int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs deleted file mode 100644 index fa1924656a..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs +++ /dev/null @@ -1,126 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using BclTimestampTzHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampTzHandler; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimestampTzHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimestampTzHandler _bclHandler; - - public TimestampTzHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimestampTzHandler(postgresType); - - #region Read - - public override Instant Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => ReadInstant(buf); - - internal static Instant ReadInstant(NpgsqlReadBuffer buf) - => buf.ReadInt64() switch - { - long.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : Instant.MaxValue, - long.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : Instant.MinValue, - var value => DecodeInstant(value) - }; - - ZonedDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription).InUtc(); - - OffsetDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription).WithOffset(Offset.Zero); - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(Instant value, NpgsqlParameter? parameter) - => 8; - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(ZonedDateTime value, NpgsqlParameter? parameter) - => value.Zone == DateTimeZone.Utc || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write ZonedDateTime with Zone={value.Zone} to PostgreSQL type 'timestamp with time zone', " + - "only UTC is supported. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - public int ValidateAndGetLength(OffsetDateTime value, NpgsqlParameter? parameter) - => value.Offset == Offset.Zero || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write OffsetDateTime with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', " + - "only offset 0 (UTC) is supported. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - public override void Write(Instant value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => WriteInstant(value, buf); - - internal static void WriteInstant(Instant value, NpgsqlWriteBuffer buf) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == Instant.MaxValue) - { - buf.WriteInt64(long.MaxValue); - return; - } - - if (value == Instant.MinValue) - { - buf.WriteInt64(long.MinValue); - return; - } - } - - buf.WriteInt64(EncodeInstant(value)); - } - - void INpgsqlSimpleTypeHandler.Write(ZonedDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => Write(value.ToInstant(), buf, parameter); - - public void Write(OffsetDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => Write(value.ToInstant(), buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs deleted file mode 100644 index b752d0e820..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs +++ /dev/null @@ -1,203 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NodaTime.Internal; - -public partial class TimestampTzMultirangeHandler : MultirangeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>> -{ - readonly INpgsqlTypeHandler _intervalHandler; - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Interval[]); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(Interval[]); - - public TimestampTzMultirangeHandler(PostgresMultirangeType pgMultirangeType, TimestampTzRangeHandler rangeHandler) - : base(pgMultirangeType, rangeHandler) - => _intervalHandler = rangeHandler; - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new Interval[numRanges]; - - for (var i = 0; i < multirange.Length; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange[i] = await _intervalHandler.Read(buf, rangeLen, async, fieldDescription); - } - - return multirange; - } - - async ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new List(numRanges); - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange.Add(await _intervalHandler.Read(buf, rangeLen, async, fieldDescription)); - } - - return multirange; - } - - public int ValidateAndGetLength(List value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - public int ValidateAndGetLength(Interval[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - int ValidateAndGetLengthCore(IList value, ref NpgsqlLengthCache? lengthCache) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var sum = 4 + 4 * value.Count; - for (var i = 0; i < value.Count; i++) - sum += _intervalHandler.ValidateAndGetLength(value[i], ref lengthCache, parameter: null); - - return lengthCache!.Set(sum); - } - - public async Task Write(Interval[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Length); - - for (var i = 0; i < value.Length; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - public async Task Write(List value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Count); - - for (var i = 0; i < value.Count; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - #region Boilerplate - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion Boilerplate -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs deleted file mode 100644 index b4998fc0d9..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs +++ /dev/null @@ -1,106 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NodaTime.Internal; - -public partial class TimestampTzRangeHandler : RangeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler>, - INpgsqlTypeHandler>, INpgsqlTypeHandler> -{ - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Interval); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(Interval); - - public TimestampTzRangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType, subtypeHandler) - { - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - // internal Interval ConvertRangetoInterval(NpgsqlRange range) - async ValueTask INpgsqlTypeHandler.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - var range = await Read(buf, len, async, fieldDescription); - - // NodaTime Interval includes the start instant and excludes the end instant. - Instant? start = range.LowerBoundInfinite - ? null - : range.LowerBoundIsInclusive - ? range.LowerBound - : range.LowerBound + Duration.Epsilon; - Instant? end = range.UpperBoundInfinite - ? null - : range.UpperBoundIsInclusive - ? range.UpperBound + Duration.Epsilon - : range.UpperBound; - return new(start, end); - } - - public int ValidateAndGetLength(Interval value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange( - new NpgsqlRange(value.HasStart ? value.Start : default, true, !value.HasStart, value.HasEnd ? value.End : default, false, !value.HasEnd), ref lengthCache, parameter); - - public Task Write(Interval value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(new NpgsqlRange(value.HasStart ? value.Start : default, true, !value.HasStart, value.HasEnd ? value.End : default, false, !value.HasEnd), - buf, lengthCache, parameter, async, cancellationToken); - - #region Boilerplate - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion Boilerplate -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index f83f6382e3..20f1107e34 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -1,18 +1,15 @@ - + Shay Rojansky NodaTime plugin for Npgsql, allowing mapping of PostgreSQL date/time types to NodaTime types. npgsql;postgresql;postgres;nodatime;date;time;ado;ado;net;database;sql README.md - netstandard2.0;net6.0 - net7.0 + net10.0 + $(NoWarn);NPG9001 - - - diff --git a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs index 9fe67ec485..ba3a0225ef 100644 --- a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs +++ b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs @@ -1,4 +1,4 @@ -using Npgsql.NodaTime.Internal; +using Npgsql.NodaTime.Internal; using Npgsql.TypeMapping; // ReSharper disable once CheckNamespace @@ -9,13 +9,24 @@ namespace Npgsql; /// public static class NpgsqlNodaTimeExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up NodaTime mappings for the PostgreSQL date/time types. /// /// The type mapper to set up (global or connection-specific) public static INpgsqlTypeMapper UseNodaTime(this INpgsqlTypeMapper mapper) { - mapper.AddTypeResolverFactory(new NodaTimeTypeHandlerResolverFactory()); + mapper.AddTypeInfoResolverFactory(new NodaTimeTypeInfoResolverFactory()); return mapper; } -} \ No newline at end of file + + /// + /// Sets up NodaTime mappings for the PostgreSQL date/time types. + /// + /// The type mapper to set up (global or connection-specific) + public static TMapper UseNodaTime(this TMapper mapper) where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new NodaTimeTypeInfoResolverFactory()); + return mapper; + } +} diff --git a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs index cf71b6d0b6..290b6c190e 100644 --- a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs @@ -1,10 +1,8 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif -[assembly: InternalsVisibleTo("Npgsql.NodaTime.Tests, PublicKey=" + +[assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + "2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + "8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs index e47b9140b5..02eb03e26d 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs @@ -1,4 +1,4 @@ -//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ // // This code was generated by a tool. // @@ -11,32 +11,46 @@ namespace Npgsql.NodaTime.Properties { using System; - [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [System.Diagnostics.DebuggerNonUserCodeAttribute()] - [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlNodaTimeStrings { - private static System.Resources.ResourceManager resourceMan; + private static global::System.Resources.ResourceManager resourceMan; - private static System.Globalization.CultureInfo resourceCulture; + private static global::System.Globalization.CultureInfo resourceCulture; - [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlNodaTimeStrings() { } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Resources.ResourceManager ResourceManager { + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { get { - if (object.Equals(null, resourceMan)) { - System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.NodaTime.Properties.NpgsqlNodaTimeStrings", typeof(NpgsqlNodaTimeStrings).Assembly); + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.NodaTime.Properties.NpgsqlNodaTimeStrings", typeof(NpgsqlNodaTimeStrings).Assembly); resourceMan = temp; } return resourceMan; } } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Globalization.CultureInfo Culture { + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -45,10 +59,31 @@ internal static System.Globalization.CultureInfo Culture { } } + /// + /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. + /// internal static string CannotReadInfinityValue { get { return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } + + /// + /// Looks up a localized string similar to Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead.. + /// + internal static string CannotReadIntervalWithMonthsAsDuration { + get { + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsDuration", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Cannot write NodaTime's Period because it's out of range for the PG interval type.. + /// + internal static string CannotWritePeriodDueToOverflow { + get { + return ResourceManager.GetString("CannotWritePeriodDueToOverflow", resourceCulture); + } + } } } diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx index d05d0c3a62..f0090afb83 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx @@ -21,4 +21,10 @@ Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled. - \ No newline at end of file + + Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead. + + + Cannot write NodaTime's Period because it's out of range for the PG interval type. + + diff --git a/src/Npgsql.NodaTime/PublicAPI.Shipped.txt b/src/Npgsql.NodaTime/PublicAPI.Shipped.txt index 998522184e..3bdfb77065 100644 --- a/src/Npgsql.NodaTime/PublicAPI.Shipped.txt +++ b/src/Npgsql.NodaTime/PublicAPI.Shipped.txt @@ -1,3 +1,4 @@ -#nullable enable +#nullable enable Npgsql.NpgsqlNodaTimeExtensions static Npgsql.NpgsqlNodaTimeExtensions.UseNodaTime(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlNodaTimeExtensions.UseNodaTime(this TMapper mapper) -> TMapper diff --git a/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt b/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt index ab058de62d..7dc5c58110 100644 --- a/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt @@ -1 +1 @@ -#nullable enable +#nullable enable diff --git a/src/Npgsql.NodaTime/README.md b/src/Npgsql.NodaTime/README.md index d9bb2e7634..d24070920b 100644 --- a/src/Npgsql.NodaTime/README.md +++ b/src/Npgsql.NodaTime/README.md @@ -2,15 +2,18 @@ Npgsql is the open source .NET data provider for PostgreSQL. It allows you to co This package is an Npgsql plugin which allows you to use the [NodaTime](https://nodatime.org) date/time library when interacting with PostgreSQL; this provides a better and safer API for dealing with date and time data. -To use the NodaTime plugin, simply add a dependency on this package and set it up at program startup: +To use the NodaTime plugin, add a dependency on this package and create a NpgsqlDataSource. Once this is done, you can use NodaTime types when interacting with PostgreSQL, just as you would use e.g. `DateTime`: ```csharp -NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); -``` +using Npgsql; -Once this is done, you can simply use NodaTime types when interacting with PostgreSQL, just as you would use e.g. `DateTime`: +var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString); + +dataSourceBuilder.UseNodaTime(); + +var dataSource = dataSourceBuilder.Build(); +var conn = await dataSource.OpenConnectionAsync(); -```csharp // Write NodaTime Instant to PostgreSQL "timestamp with time zone" (UTC) using (var cmd = new NpgsqlCommand(@"INSERT INTO mytable (my_timestamptz) VALUES (@p)", conn)) { diff --git a/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs b/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs new file mode 100644 index 0000000000..2b48f0b52c --- /dev/null +++ b/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs @@ -0,0 +1,19 @@ +using System; +using OpenTelemetry.Metrics; + +// ReSharper disable once CheckNamespace +namespace Npgsql; + +/// +/// Extension method for setting up Npgsql OpenTelemetry metrics. +/// +public static class MeterProviderBuilderExtensions +{ + /// + /// Subscribes to the Npgsql metrics reporter to enable OpenTelemetry metrics. + /// + public static MeterProviderBuilder AddNpgsqlInstrumentation( + this MeterProviderBuilder builder, + Action? options = null) + => builder.AddMeter("Npgsql"); +} diff --git a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj index 7f9fea3eea..18592f8a5f 100644 --- a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj +++ b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj @@ -2,8 +2,7 @@ Shay Rojansky - netstandard2.0 - net7.0 + net10.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;opentelemetry;tracing;diagnostics;instrumentation README.md diff --git a/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs b/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs index 1a340b1a15..f30dbdd96f 100644 --- a/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs +++ b/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs b/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs index 0c34138278..f4fabc920b 100644 --- a/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs +++ b/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; using OpenTelemetry.Trace; // ReSharper disable once CheckNamespace @@ -12,8 +12,6 @@ public static class TracerProviderBuilderExtensions /// /// Subscribes to the Npgsql activity source to enable OpenTelemetry tracing. /// - public static TracerProviderBuilder AddNpgsql( - this TracerProviderBuilder builder, - Action? options = null) + public static TracerProviderBuilder AddNpgsql(this TracerProviderBuilder builder) => builder.AddSource("Npgsql"); -} \ No newline at end of file +} diff --git a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj index e10f1cf3e2..4f5c1eb42d 100644 --- a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj +++ b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj @@ -2,8 +2,10 @@ netstandard2.0 + false 1591 - true + true + false @@ -26,7 +28,6 @@ - diff --git a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt index fae3d00e6c..9ad343124c 100644 --- a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt +++ b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt @@ -4,7 +4,7 @@ using System.Collections.Generic; #nullable disable #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member #pragma warning disable RS0016 // Add public types and members to the declared API -#pragma warning disable 618 // Member is obsolete +#pragma warning disable CS0618 // Member is obsolete namespace Npgsql { @@ -13,18 +13,18 @@ namespace Npgsql private partial int Init() { // Set the strongly-typed properties to their default values - {{ + {{~ for p in properties if p.is_obsolete continue end if (p.default_value != null) - }} + ~}} {{ p.name }} = {{ p.default_value }}; - {{ + {{~ end - end }} + end ~}} // Setting the strongly-typed properties here also set the string-based properties in the base class. // Clear them (default settings = empty connection string) @@ -33,91 +33,59 @@ namespace Npgsql return 0; } - private partial int GeneratedSetter(string keyword, object value) + private partial bool GeneratedActions(GeneratedAction action, string keyword, ref object value) { switch (keyword) { - {{ for kv in properties_by_keyword }} + {{~ for kv in properties_by_keyword ~}} case "{{ kv.key }}": - {{ p = kv.value }} - {{ if p.is_enum }} + {{~ for alternative in kv.value.alternatives ~}} + case "{{ alternative }}": + {{~ end ~}} { - {{ p.name }} = value is string s - ? ({{ p.type_name }})Enum.Parse(typeof({{ p.type_name }}), s, ignoreCase: true) - : ({{ p.type_name }})Convert.ChangeType(value, typeof({{ p.type_name }})); - } - {{ else }} - {{ p.name }} = ({{ p.type_name }})Convert.ChangeType(value, typeof({{ p.type_name }})); - {{ end }} - break; - {{ end }} - - default: - throw new KeyNotFoundException(); - } - - return 0; - } - - private partial bool TryGetValueGenerated(string keyword, out object value) - { - switch (keyword) - { - {{ for kv in properties_by_keyword }} - case "{{ kv.key }}": - {{ p = kv.value }} - value = (object){{ p.name }} ?? ""; + {{~ p = kv.value ~}} + const string canonicalName = "{{ p.canonical_name }}"; + switch(action) + { + case GeneratedAction.Remove: + var removed = base.ContainsKey(canonicalName); + {{~ if p.default_value == null ~}} + {{ p.name }} = default; + {{~ else ~}} + {{ p.name }} = {{ p.default_value }}; + {{~ end ~}} + {{~ if p.type_name != "String" ~}} + base.Remove(canonicalName); + {{~ else ~}} + // String property setters call SetValue, which itself calls base.Remove(). + {{~ end ~}} + return removed; + case GeneratedAction.Set: + {{~ if p.is_enum ~}} + {{ p.name }} = ({{ p.type_name }})GetValue(typeof({{ p.type_name }}), value); + {{~ else ~}} + {{ p.name }} = ({{ p.type_name }})Convert.ChangeType(value, typeof({{ p.type_name }})); + {{~ end ~}} + break; + case GeneratedAction.Get: + value = (object){{ p.name }} ?? ""; + break; + case GeneratedAction.GetCanonical: + value = canonicalName; + break; + } return true; - {{ end }} } - - value = null; - return false; - } - - private partial bool ContainsKeyGenerated(string keyword) - => keyword switch - { - {{ for kv in properties_by_keyword }} - "{{ kv.key }}" => true, - {{ end }} - - _ => false - }; - - private partial bool RemoveGenerated(string keyword) - { - switch (keyword) - { - {{ for kv in properties_by_keyword }} - case "{{ kv.key }}": - { - {{ p = kv.value }} - var removed = base.ContainsKey("{{ p.canonical_name }}"); - // Note that string property setters call SetValue, which itself calls base.Remove(). - {{ if p.default_value == null }} - {{ p.name }} = default; - {{ else }} - {{ p.name }} = {{ p.default_value }}; - {{ end }} - base.Remove("{{ p.canonical_name }}"); - return removed; - } - {{ end }} - - default: - throw new KeyNotFoundException(); + {{~ end ~}} } + if (action is GeneratedAction.Get or GeneratedAction.GetCanonical) + return false; + throw new KeyNotFoundException(); + + static object GetValue(Type type, object value) + => value is string s + ? Enum.Parse(type, s, ignoreCase: true) + : Convert.ChangeType(value, type); } - - private partial string ToCanonicalKeyword(string keyword) - => keyword switch - { - {{ for kv in properties_by_keyword }} - "{{ kv.key }}" => "{{ kv.value.canonical_name }}", - {{ end }} - - _ => throw new KeyNotFoundException() - }; } } diff --git a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs index f7008610b0..c7c7228321 100644 --- a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs +++ b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs @@ -9,7 +9,7 @@ namespace Npgsql.SourceGenerators; [Generator] -public class NpgsqlConnectionStringBuilderSourceGenerator : ISourceGenerator +public class NpgsqlConnectionStringBuilderSourceGenerator : IIncrementalGenerator { static readonly DiagnosticDescriptor InternalError = new DiagnosticDescriptor( id: "PGXXXX", @@ -19,93 +19,107 @@ public class NpgsqlConnectionStringBuilderSourceGenerator : ISourceGenerator DiagnosticSeverity.Error, isEnabledByDefault: true); - public void Initialize(GeneratorInitializationContext context) {} - - public void Execute(GeneratorExecutionContext context) + public void Initialize(IncrementalGeneratorInitializationContext context) { - if (context.Compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringBuilder") is not { } type) - return; - - if (context.Compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringPropertyAttribute") is not - { } connectionStringPropertyAttribute) - { - context.ReportDiagnostic(Diagnostic.Create( - InternalError, - location: null, - "Could not find Npgsql.NpgsqlConnectionStringPropertyAttribute")); - return; - } - - var obsoleteAttribute = context.Compilation.GetTypeByMetadataName("System.ObsoleteAttribute"); - var displayNameAttribute = context.Compilation.GetTypeByMetadataName("System.ComponentModel.DisplayNameAttribute"); - var defaultValueAttribute = context.Compilation.GetTypeByMetadataName("System.ComponentModel.DefaultValueAttribute"); - - if (obsoleteAttribute is null || displayNameAttribute is null || defaultValueAttribute is null) - { - context.ReportDiagnostic(Diagnostic.Create( - InternalError, - location: null, - "Could not find ObsoleteAttribute, DisplayNameAttribute or DefaultValueAttribute")); - return; - } - - var properties = new List(); - var propertiesByKeyword = new Dictionary(); - foreach (var member in type.GetMembers()) + var compilationProvider = context.CompilationProvider; + context.RegisterSourceOutput(compilationProvider, (spc, compilation) => { - if (member is not IPropertySymbol property || - property.GetAttributes().FirstOrDefault(a => connectionStringPropertyAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) is not { } propertyAttribute || - property.GetAttributes() - .FirstOrDefault(a => displayNameAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) - ?.ConstructorArguments[0].Value is not string displayName) + var type = compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringBuilder"); + if (type is null) + return; + + var connectionStringPropertyAttribute = compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringPropertyAttribute"); + if (connectionStringPropertyAttribute is null) { - continue; + spc.ReportDiagnostic(Diagnostic.Create( + InternalError, + location: null, + "Could not find Npgsql.NpgsqlConnectionStringPropertyAttribute")); + return; } - var explicitDefaultValue = property.GetAttributes() - .FirstOrDefault(a => defaultValueAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) - ?.ConstructorArguments[0].Value; - - if (explicitDefaultValue is string s) - explicitDefaultValue = '"' + s.Replace("\"", "\"\"") + '"'; + var obsoleteAttribute = compilation.GetTypeByMetadataName("System.ObsoleteAttribute"); + var displayNameAttribute = compilation.GetTypeByMetadataName("System.ComponentModel.DisplayNameAttribute"); + var defaultValueAttribute = compilation.GetTypeByMetadataName("System.ComponentModel.DefaultValueAttribute"); - if (explicitDefaultValue is not null && property.Type.TypeKind == TypeKind.Enum) + if (obsoleteAttribute is null || displayNameAttribute is null || defaultValueAttribute is null) { - explicitDefaultValue = $"({property.Type.Name}){explicitDefaultValue}"; - // var foo = property.Type.Name; - // explicitDefaultValue += $"/* {foo} */"; + spc.ReportDiagnostic(Diagnostic.Create( + InternalError, + location: null, + "Could not find ObsoleteAttribute, DisplayNameAttribute or DefaultValueAttribute")); + return; } - var propertyDetails = new PropertyDetails + var properties = new List(); + var propertiesByKeyword = new Dictionary(); + foreach (var member in type.GetMembers()) { - Name = property.Name, - CanonicalName = displayName, - TypeName = property.Type.Name, - IsEnum = property.Type.TypeKind == TypeKind.Enum, - IsObsolete = property.GetAttributes().Any(a => obsoleteAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)), - DefaultValue = explicitDefaultValue - }; - - properties.Add(propertyDetails); + if (member is not IPropertySymbol property || + property.GetAttributes().FirstOrDefault(a => connectionStringPropertyAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) is not { } propertyAttribute || + property.GetAttributes() + .FirstOrDefault(a => displayNameAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) + ?.ConstructorArguments[0].Value is not string displayName) + { + continue; + } + + var explicitDefaultValue = property.GetAttributes() + .FirstOrDefault(a => defaultValueAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) + ?.ConstructorArguments[0].Value; + + if (explicitDefaultValue is string s) + explicitDefaultValue = '"' + s.Replace("\"", "\"\"") + '"'; + + if (explicitDefaultValue is not null && property.Type.TypeKind == TypeKind.Enum) + { + explicitDefaultValue = $"({property.Type.Name}){explicitDefaultValue}"; + } + + var propertyDetails = new PropertyDetails + { + Name = property.Name, + CanonicalName = displayName, + TypeName = property.Type.Name, + IsEnum = property.Type.TypeKind == TypeKind.Enum, + IsObsolete = property.GetAttributes().Any(a => obsoleteAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)), + DefaultValue = explicitDefaultValue + }; + + properties.Add(propertyDetails); + + propertiesByKeyword[displayName.ToUpperInvariant()] = propertyDetails; + if (property.Name != displayName) + { + var propertyName = property.Name.ToUpperInvariant(); + if (!propertiesByKeyword.ContainsKey(propertyName)) + propertyDetails.Alternatives.Add(propertyName); + } + + if (propertyAttribute.ConstructorArguments.Length == 1) + { + foreach (var synonymArg in propertyAttribute.ConstructorArguments[0].Values) + { + if (synonymArg.Value is string synonym) + { + var synonymName = synonym.ToUpperInvariant(); + if (!propertiesByKeyword.ContainsKey(synonymName)) + propertyDetails.Alternatives.Add(synonymName); + } + } + } + } - propertiesByKeyword[displayName.ToUpperInvariant()] = propertyDetails; - if (property.Name != displayName) - propertiesByKeyword[property.Name.ToUpperInvariant()] = propertyDetails; - if (propertyAttribute.ConstructorArguments.Length == 1) - foreach (var synonymArg in propertyAttribute.ConstructorArguments[0].Values) - if (synonymArg.Value is string synonym) - propertiesByKeyword[synonym.ToUpperInvariant()] = propertyDetails; - } + var template = Template.Parse(EmbeddedResource.GetContent("NpgsqlConnectionStringBuilder.snbtxt"), "NpgsqlConnectionStringBuilder.snbtxt"); - var template = Template.Parse(EmbeddedResource.GetContent("NpgsqlConnectionStringBuilder.snbtxt"), "NpgsqlConnectionStringBuilder.snbtxt"); + var output = template.Render(new + { + Properties = properties, + PropertiesByKeyword = propertiesByKeyword + }); - var output = template.Render(new - { - Properties = properties, - PropertiesByKeyword = propertiesByKeyword + spc.AddSource(type.Name + ".Generated.cs", SourceText.From(output, Encoding.UTF8)); }); - - context.AddSource(type.Name + ".Generated.cs", SourceText.From(output, Encoding.UTF8)); } sealed class PropertyDetails @@ -116,5 +130,18 @@ sealed class PropertyDetails public bool IsEnum { get; set; } public bool IsObsolete { get; set; } public object? DefaultValue { get; set; } + + public HashSet Alternatives { get; } = new(StringComparer.Ordinal); + + public PropertyDetails Clone() + => new() + { + Name = Name, + CanonicalName = CanonicalName, + TypeName = TypeName, + IsEnum = IsEnum, + IsObsolete = IsObsolete, + DefaultValue = DefaultValue + }; } -} \ No newline at end of file +} diff --git a/src/Npgsql.SourceGenerators/TypeHandler.snbtxt b/src/Npgsql.SourceGenerators/TypeHandler.snbtxt deleted file mode 100644 index 041c948881..0000000000 --- a/src/Npgsql.SourceGenerators/TypeHandler.snbtxt +++ /dev/null @@ -1,36 +0,0 @@ -{{ for using in usings }} -using {{ using }}; -{{ end }} - -#nullable enable -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member -#pragma warning disable RS0016 // Add public types and members to the declared API -#pragma warning disable 618 // Member is obsolete - -namespace {{ namespace }} -{ - partial class {{ type_name }} - { - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - {{ for interface in interfaces }} - {{ interface.handled_type }} converted => (({{ interface.name }})this).ValidateAndGetLength(converted, {{ is_simple ? "" : "ref lengthCache, " }}parameter), - {{ end }} - - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {{ type_name }}") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - {{ for interface in interfaces }} - {{ interface.handled_type }} converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - {{ end }} - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {{ type_name }}") - }; - } -} diff --git a/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs b/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs deleted file mode 100644 index 75edca250a..0000000000 --- a/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs +++ /dev/null @@ -1,129 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using Microsoft.CodeAnalysis; -using Microsoft.CodeAnalysis.CSharp; -using Microsoft.CodeAnalysis.CSharp.Syntax; -using Microsoft.CodeAnalysis.Text; -using Scriban; - -namespace Npgsql.SourceGenerators; - -[Generator] -sealed class TypeHandlerSourceGenerator : ISourceGenerator -{ - public void Initialize(GeneratorInitializationContext context) - => context.RegisterForSyntaxNotifications(() => new MySyntaxReceiver()); - - public void Execute(GeneratorExecutionContext context) - { - var compilation = context.Compilation; - - var (simpleTypeHandlerInterfaceSymbol, typeHandlerInterfaceSymbol) = ( - compilation.GetTypeByMetadataName("Npgsql.Internal.TypeHandling.INpgsqlSimpleTypeHandler`1"), - compilation.GetTypeByMetadataName("Npgsql.Internal.TypeHandling.INpgsqlTypeHandler`1")); - - if (simpleTypeHandlerInterfaceSymbol is null || typeHandlerInterfaceSymbol is null) - throw new Exception("Could not find INpgsqlSimpleTypeHandler or INpgsqlTypeHandler"); - - var template = Template.Parse(EmbeddedResource.GetContent("TypeHandler.snbtxt"), "TypeHandler.snbtxt"); - - foreach (var cds in ((MySyntaxReceiver)context.SyntaxReceiver!).TypeHandlerCandidates) - { - var semanticModel = compilation.GetSemanticModel(cds.SyntaxTree); - if (semanticModel.GetDeclaredSymbol(cds) is not INamedTypeSymbol typeSymbol) - continue; - - if (typeSymbol.AllInterfaces.Any(i => - i.OriginalDefinition.Equals(simpleTypeHandlerInterfaceSymbol, SymbolEqualityComparer.Default))) - { - AugmentTypeHandler(template, typeSymbol, cds, isSimple: true); - continue; - } - - if (typeSymbol.AllInterfaces.Any(i => - i.OriginalDefinition.Equals(typeHandlerInterfaceSymbol, SymbolEqualityComparer.Default))) - { - AugmentTypeHandler(template, typeSymbol, cds, isSimple: false); - } - } - - void AugmentTypeHandler( - Template template, - INamedTypeSymbol typeSymbol, - ClassDeclarationSyntax classDeclarationSyntax, - bool isSimple) - { - var usings = new HashSet( - new[] - { - "System", - "System.Threading", - "System.Threading.Tasks", - "Npgsql.Internal" - }.Concat(classDeclarationSyntax.SyntaxTree.GetCompilationUnitRoot().Usings - .Where(u => u.Alias is null && u.StaticKeyword.IsKind(SyntaxKind.None)) - .Select(u => u.Name.ToString()))); - - var interfaces = typeSymbol.AllInterfaces - .Where(i => i.OriginalDefinition.Equals(isSimple ? simpleTypeHandlerInterfaceSymbol : typeHandlerInterfaceSymbol, - SymbolEqualityComparer.Default)) - // Hacky: we want to emit switch arms for abstract types after concrete ones, since otherwise the compiled complains about - // unreachable arms - .OrderBy(i => i.TypeArguments[0].IsAbstract); - - var output = template.Render(new - { - Usings = usings, - TypeName = FormatTypeName(typeSymbol), - Namespace = typeSymbol.ContainingNamespace.ToDisplayString(), - IsSimple = isSimple, - Interfaces = interfaces.Select(i => new - { - Name = FormatTypeName(i), - HandledType = FormatTypeName(i.TypeArguments[0]), - }) - }); - - context.AddSource(typeSymbol.Name + ".Generated.cs", SourceText.From(output, Encoding.UTF8)); - } - - static string FormatTypeName(ITypeSymbol typeSymbol) - { - if (typeSymbol is INamedTypeSymbol namedTypeSymbol) - { - return namedTypeSymbol.IsGenericType - ? new StringBuilder(namedTypeSymbol.Name) - .Append('<') - .Append(string.Join(",", namedTypeSymbol.TypeArguments.Select(FormatTypeName))) - .Append('>') - .ToString() - : namedTypeSymbol.Name; - } - - if (typeSymbol.TypeKind == TypeKind.Array) - { - return $"{FormatTypeName(((IArrayTypeSymbol)typeSymbol).ElementType)}[]"; - // return "int"; - } - - return typeSymbol.ToString(); - } - } - - sealed class MySyntaxReceiver : ISyntaxReceiver - { - public List TypeHandlerCandidates { get; } = new(); - - public void OnVisitSyntaxNode(SyntaxNode syntaxNode) - { - if (syntaxNode is ClassDeclarationSyntax cds && - cds.BaseList is not null && - cds.Modifiers.Any(SyntaxKind.PartialKeyword)) - { - TypeHandlerCandidates.Add(cds); - } - } - } -} \ No newline at end of file diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index 415f2a8577..fe8e6edf5b 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -1,8 +1,7 @@ -using System; +using System; using System.Collections.Generic; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Util; namespace Npgsql.BackendMessages; @@ -14,23 +13,15 @@ abstract class AuthenticationRequestMessage : IBackendMessage sealed class AuthenticationOkMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationOk; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.Ok; internal static readonly AuthenticationOkMessage Instance = new(); AuthenticationOkMessage() { } } -sealed class AuthenticationKerberosV5Message : AuthenticationRequestMessage -{ - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationKerberosV5; - - internal static readonly AuthenticationKerberosV5Message Instance = new(); - AuthenticationKerberosV5Message() { } -} - sealed class AuthenticationCleartextPasswordMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationCleartextPassword; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.CleartextPassword; internal static readonly AuthenticationCleartextPasswordMessage Instance = new(); AuthenticationCleartextPasswordMessage() { } @@ -38,9 +29,9 @@ sealed class AuthenticationCleartextPasswordMessage : AuthenticationRequestMess sealed class AuthenticationMD5PasswordMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationMD5Password; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.MD5Password; - internal byte[] Salt { get; private set; } + internal byte[] Salt { get; } internal static AuthenticationMD5PasswordMessage Load(NpgsqlReadBuffer buf) { @@ -50,22 +41,12 @@ internal static AuthenticationMD5PasswordMessage Load(NpgsqlReadBuffer buf) } AuthenticationMD5PasswordMessage(byte[] salt) - { - Salt = salt; - } -} - -sealed class AuthenticationSCMCredentialMessage : AuthenticationRequestMessage -{ - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSCMCredential; - - internal static readonly AuthenticationSCMCredentialMessage Instance = new(); - AuthenticationSCMCredentialMessage() { } + => Salt = salt; } sealed class AuthenticationGSSMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationGSS; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.GSS; internal static readonly AuthenticationGSSMessage Instance = new(); AuthenticationGSSMessage() { } @@ -73,9 +54,9 @@ sealed class AuthenticationGSSMessage : AuthenticationRequestMessage sealed class AuthenticationGSSContinueMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationGSSContinue; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.GSSContinue; - internal byte[] AuthenticationData { get; private set; } + internal byte[] AuthenticationData { get; } internal static AuthenticationGSSContinueMessage Load(NpgsqlReadBuffer buf, int len) { @@ -86,14 +67,12 @@ internal static AuthenticationGSSContinueMessage Load(NpgsqlReadBuffer buf, int } AuthenticationGSSContinueMessage(byte[] authenticationData) - { - AuthenticationData = authenticationData; - } + => AuthenticationData = authenticationData; } sealed class AuthenticationSSPIMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSSPI; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SSPI; internal static readonly AuthenticationSSPIMessage Instance = new(); AuthenticationSSPIMessage() { } @@ -103,8 +82,8 @@ sealed class AuthenticationSSPIMessage : AuthenticationRequestMessage sealed class AuthenticationSASLMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSASL; - internal List Mechanisms { get; } = new(); + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASL; + internal List Mechanisms { get; } = []; internal AuthenticationSASLMessage(NpgsqlReadBuffer buf) { @@ -118,7 +97,7 @@ internal AuthenticationSASLMessage(NpgsqlReadBuffer buf) sealed class AuthenticationSASLContinueMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSASLContinue; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASLContinue; internal byte[] Payload { get; } internal AuthenticationSASLContinueMessage(NpgsqlReadBuffer buf, int len) @@ -136,7 +115,7 @@ sealed class AuthenticationSCRAMServerFirstMessage internal static AuthenticationSCRAMServerFirstMessage Load(byte[] bytes, ILogger connectionLogger) { - var data = PGUtil.UTF8Encoding.GetString(bytes); + var data = NpgsqlWriteBuffer.UTF8Encoding.GetString(bytes); string? nonce = null, salt = null; var iteration = -1; @@ -172,7 +151,7 @@ internal static AuthenticationSCRAMServerFirstMessage Load(byte[] bytes, ILogger sealed class AuthenticationSASLFinalMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSASLFinal; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASLFinal; internal byte[] Payload { get; } internal AuthenticationSASLFinalMessage(NpgsqlReadBuffer buf, int len) @@ -188,7 +167,7 @@ sealed class AuthenticationSCRAMServerFinalMessage internal static AuthenticationSCRAMServerFinalMessage Load(byte[] bytes, ILogger connectionLogger) { - var data = PGUtil.UTF8Encoding.GetString(bytes); + var data = NpgsqlWriteBuffer.UTF8Encoding.GetString(bytes); string? serverSignature = null; foreach (var part in data.Split(',')) @@ -211,20 +190,15 @@ internal AuthenticationSCRAMServerFinalMessage(string serverSignature) #endregion SASL -// TODO: Remove Authentication prefix from everything enum AuthenticationRequestType { - AuthenticationOk = 0, - AuthenticationKerberosV4 = 1, - AuthenticationKerberosV5 = 2, - AuthenticationCleartextPassword = 3, - AuthenticationCryptPassword = 4, - AuthenticationMD5Password = 5, - AuthenticationSCMCredential = 6, - AuthenticationGSS = 7, - AuthenticationGSSContinue = 8, - AuthenticationSSPI = 9, - AuthenticationSASL = 10, - AuthenticationSASLContinue = 11, - AuthenticationSASLFinal = 12 + Ok = 0, + CleartextPassword = 3, + MD5Password = 5, + GSS = 7, + GSSContinue = 8, + SSPI = 9, + SASL = 10, + SASLContinue = 11, + SASLFinal = 12 } diff --git a/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs b/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs index cf6506b02f..cf72e619b2 100644 --- a/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs +++ b/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; namespace Npgsql.BackendMessages; @@ -6,8 +6,8 @@ sealed class BackendKeyDataMessage : IBackendMessage { public BackendMessageCode Code => BackendMessageCode.BackendKeyData; - internal int BackendProcessId { get; private set; } - internal int BackendSecretKey { get; private set; } + internal int BackendProcessId { get; } + internal int BackendSecretKey { get; } internal BackendKeyDataMessage(NpgsqlReadBuffer buf) { diff --git a/src/Npgsql/BackendMessages/BindCompleteMessage.cs b/src/Npgsql/BackendMessages/BindCompleteMessage.cs index f6dbfce1bb..d4a9f2b2d6 100644 --- a/src/Npgsql/BackendMessages/BindCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/BindCompleteMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class BindCompleteMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/CloseCompletedMessage.cs b/src/Npgsql/BackendMessages/CloseCompletedMessage.cs index 9443fd3e97..522b3c46e3 100644 --- a/src/Npgsql/BackendMessages/CloseCompletedMessage.cs +++ b/src/Npgsql/BackendMessages/CloseCompletedMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class CloseCompletedMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs index 6d9800a27f..91bc43fff1 100644 --- a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs @@ -1,122 +1,61 @@ -using System.Diagnostics; +using System; +using System.Buffers.Text; using Npgsql.Internal; namespace Npgsql.BackendMessages; sealed class CommandCompleteMessage : IBackendMessage { + uint _oid; + ulong _rows; internal StatementType StatementType { get; private set; } - internal uint OID { get; private set; } - internal ulong Rows { get; private set; } + + internal uint OID => _oid; + internal ulong Rows => _rows; internal CommandCompleteMessage Load(NpgsqlReadBuffer buf, int len) { - Rows = 0; - OID = 0; - - var bytes = buf.Buffer; - var i = buf.ReadPosition; + var bytes = buf.Span.Slice(0, len); buf.Skip(len); - switch (bytes[i]) - { - case (byte)'I': - if (!AreEqual(bytes, i, "INSERT ")) - goto default; - StatementType = StatementType.Insert; - i += 7; - OID = (uint) ParseNumber(bytes, ref i); - i++; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'D': - if (!AreEqual(bytes, i, "DELETE ")) - goto default; - StatementType = StatementType.Delete; - i += 7; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'U': - if (!AreEqual(bytes, i, "UPDATE ")) - goto default; - StatementType = StatementType.Update; - i += 7; - Rows = ParseNumber(bytes, ref i); - return this; - case (byte)'S': - if (!AreEqual(bytes, i, "SELECT ")) - goto default; - StatementType = StatementType.Select; - i += 7; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'M': - if (AreEqual(bytes, i, "MERGE ")) - { - StatementType = StatementType.Merge; - i += 6; - } - else if (AreEqual(bytes, i, "MOVE ")) - { - StatementType = StatementType.Move; - i += 5; - } - else - goto default; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'F': - if (!AreEqual(bytes, i, "FETCH ")) - goto default; - StatementType = StatementType.Fetch; - i += 6; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'C': - if (AreEqual(bytes, i, "COPY ")) - { - StatementType = StatementType.Copy; - i += 5; - Rows = ParseNumber(bytes, ref i); - return this; - } - if (bytes[i + 4] == 0 && AreEqual(bytes, i, "CALL")) - { - StatementType = StatementType.Call; - return this; - } + // PostgreSQL always writes these strings as ASCII, see https://github.com/postgres/postgres/blob/c8e1ba736b2b9e8c98d37a5b77c4ed31baf94147/src/backend/tcop/cmdtag.c#L130-L133 + (StatementType, var argumentsStart) = Convert.ToChar(bytes[0]) switch + { + 'S' when bytes.StartsWith("SELECT "u8) => (StatementType.Select, "SELECT ".Length), + 'I' when bytes.StartsWith("INSERT "u8) => (StatementType.Insert, "INSERT ".Length), + 'U' when bytes.StartsWith("UPDATE "u8) => (StatementType.Update, "UPDATE ".Length), + 'D' when bytes.StartsWith("DELETE "u8) => (StatementType.Delete, "DELETE ".Length), + 'M' when bytes.StartsWith("MERGE "u8) => (StatementType.Merge, "MERGE ".Length), + 'C' when bytes.StartsWith("COPY "u8) => (StatementType.Copy, "COPY ".Length), + 'C' when bytes.StartsWith("CALL"u8) => (StatementType.Call, "CALL".Length), + 'M' when bytes.StartsWith("MOVE "u8) => (StatementType.Move, "MOVE ".Length), + 'F' when bytes.StartsWith("FETCH "u8) => (StatementType.Fetch, "FETCH ".Length), + 'C' when bytes.StartsWith("CREATE TABLE AS "u8) => (StatementType.CreateTableAs, "CREATE TABLE AS ".Length), + _ => (StatementType.Other, 0) + }; + + _oid = 0; + _rows = 0; + + // Slice away the null terminator. + var arguments = bytes.Slice(argumentsStart, bytes.Length - argumentsStart - 1); + switch (StatementType) + { + case StatementType.Other: + case StatementType.Call: + break; + case StatementType.Insert: + if (!Utf8Parser.TryParse(arguments, out _oid, out var nextArgumentOffset)) + throw new InvalidOperationException("Invalid bytes in command complete message."); + arguments = arguments.Slice(nextArgumentOffset + 1); goto default; - default: - StatementType = StatementType.Other; - return this; + if (!Utf8Parser.TryParse(arguments, out _rows, out _)) + throw new InvalidOperationException("Invalid bytes in command complete message."); + break; } - } - static bool AreEqual(byte[] bytes, int pos, string s) - { - for (var i = 0; i < s.Length; i++) - { - if (bytes[pos+i] != s[i]) - return false; - } - return true; - } - - static ulong ParseNumber(byte[] bytes, ref int pos) - { - Debug.Assert(bytes[pos] >= '0' && bytes[pos] <= '9'); - uint result = 0; - do - { - result = result * 10 + bytes[pos++] - '0'; - } while (bytes[pos] >= '0' && bytes[pos] <= '9'); - return result; + return this; } public BackendMessageCode Code => BackendMessageCode.CommandComplete; diff --git a/src/Npgsql/BackendMessages/CopyMessages.cs b/src/Npgsql/BackendMessages/CopyMessages.cs index 67ee5da526..84dd271617 100644 --- a/src/Npgsql/BackendMessages/CopyMessages.cs +++ b/src/Npgsql/BackendMessages/CopyMessages.cs @@ -1,7 +1,6 @@ -using System; +using System; using System.Collections.Generic; using Npgsql.Internal; -using Npgsql.Util; namespace Npgsql.BackendMessages; @@ -11,12 +10,10 @@ abstract class CopyResponseMessageBase : IBackendMessage internal bool IsBinary { get; private set; } internal short NumColumns { get; private set; } - internal List ColumnFormatCodes { get; } + internal List ColumnFormatCodes { get; } internal CopyResponseMessageBase() - { - ColumnFormatCodes = new List(); - } + => ColumnFormatCodes = []; internal void Load(NpgsqlReadBuffer buf) { @@ -32,7 +29,7 @@ internal void Load(NpgsqlReadBuffer buf) NumColumns = buf.ReadInt16(); for (var i = 0; i < NumColumns; i++) - ColumnFormatCodes.Add((FormatCode)buf.ReadInt16()); + ColumnFormatCodes.Add(DataFormatUtils.Create(buf.ReadInt16())); } } @@ -91,4 +88,4 @@ sealed class CopyDoneMessage : IBackendMessage public BackendMessageCode Code => BackendMessageCode.CopyDone; internal static readonly CopyDoneMessage Instance = new(); CopyDoneMessage() { } -} \ No newline at end of file +} diff --git a/src/Npgsql/BackendMessages/DataRowMessage.cs b/src/Npgsql/BackendMessages/DataRowMessage.cs index b4fddf9789..b4b4b40279 100644 --- a/src/Npgsql/BackendMessages/DataRowMessage.cs +++ b/src/Npgsql/BackendMessages/DataRowMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; /// /// DataRow is special in that it does not parse the actual contents of the backend message, diff --git a/src/Npgsql/BackendMessages/EmptyQueryMessage.cs b/src/Npgsql/BackendMessages/EmptyQueryMessage.cs index ef190f3678..d9f57d5189 100644 --- a/src/Npgsql/BackendMessages/EmptyQueryMessage.cs +++ b/src/Npgsql/BackendMessages/EmptyQueryMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class EmptyQueryMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs b/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs index 8a22139a94..f9cf4c8575 100644 --- a/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs +++ b/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using Microsoft.Extensions.Logging; using Npgsql.Internal; diff --git a/src/Npgsql/BackendMessages/NoDataMessage.cs b/src/Npgsql/BackendMessages/NoDataMessage.cs index 884d5c4d5e..9ff7176cbe 100644 --- a/src/Npgsql/BackendMessages/NoDataMessage.cs +++ b/src/Npgsql/BackendMessages/NoDataMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class NoDataMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs index ebda485331..3e98ab96f3 100644 --- a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using Npgsql.Internal; namespace Npgsql.BackendMessages; @@ -9,9 +9,7 @@ sealed class ParameterDescriptionMessage : IBackendMessage internal List TypeOIDs { get; } internal ParameterDescriptionMessage() - { - TypeOIDs = new List(); - } + => TypeOIDs = []; internal ParameterDescriptionMessage Load(NpgsqlReadBuffer buf) { @@ -23,4 +21,4 @@ internal ParameterDescriptionMessage Load(NpgsqlReadBuffer buf) } public BackendMessageCode Code => BackendMessageCode.ParameterDescription; -} \ No newline at end of file +} diff --git a/src/Npgsql/BackendMessages/ParseCompleteMessage.cs b/src/Npgsql/BackendMessages/ParseCompleteMessage.cs index bb011f821a..406bd9e194 100644 --- a/src/Npgsql/BackendMessages/ParseCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/ParseCompleteMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class ParseCompleteMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs b/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs index 5da91ea831..96663eaa45 100644 --- a/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs +++ b/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class PortalSuspendedMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs b/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs index 4d7225c422..64c9219342 100644 --- a/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs +++ b/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; namespace Npgsql.BackendMessages; diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index e50f5af795..a453fdbbf0 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -1,32 +1,46 @@ -using System; -using System.Collections; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; +using System.Runtime.CompilerServices; +using System.Threading; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Replication.PgOutput.Messages; -using Npgsql.TypeMapping; -using Npgsql.Util; namespace Npgsql.BackendMessages; +readonly struct ReadConversionContext(PgConcreteTypeInfo typeInfo, PgFieldBinding binding) +{ + public bool IsDefault => TypeInfo is null; + public PgConcreteTypeInfo TypeInfo { get; } = typeInfo; + public PgFieldBinding Binding { get; } = binding; +} + /// /// A RowDescription message sent from the backend. /// /// /// See https://www.postgresql.org/docs/current/static/protocol-message-formats.html /// -sealed class RowDescriptionMessage : IBackendMessage, IReadOnlyList +sealed class RowDescriptionMessage : IBackendMessage { + // We should really have CompareOptions.IgnoreKanaType here, but see + // https://github.com/dotnet/corefx/issues/12518#issuecomment-389658716 + static readonly StringComparer InvariantIgnoreCaseAndKanaWidthComparer = + CultureInfo.InvariantCulture.CompareInfo.GetStringComparer( + CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType); + + readonly bool _connectorOwned; FieldDescription?[] _fields; readonly Dictionary _nameIndex; Dictionary? _insensitiveIndex; + ReadConversionContext[]? _lastConverterInfoCache; - internal RowDescriptionMessage(int numFields = 10) + internal RowDescriptionMessage(bool connectorOwned, int numFields = 10) { + _connectorOwned = connectorOwned; _fields = new FieldDescription[numFields]; _nameIndex = new Dictionary(); } @@ -39,10 +53,10 @@ internal RowDescriptionMessage(int numFields = 10) _fields[i] = source._fields[i]!.Clone(); _nameIndex = new Dictionary(source._nameIndex); if (source._insensitiveIndex?.Count > 0) - _insensitiveIndex = new Dictionary(source._insensitiveIndex); + _insensitiveIndex = new Dictionary(source._insensitiveIndex, InvariantIgnoreCaseAndKanaWidthComparer); } - internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, TypeMapper typeMapper) + internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, PgSerializerOptions options) { _nameIndex.Clear(); _insensitiveIndex?.Clear(); @@ -60,14 +74,14 @@ internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, TypeMapper typeMapper) var field = _fields[i] ??= new(); field.Populate( - typeMapper, + options, name: buf.ReadNullTerminatedString(), tableOID: buf.ReadUInt32(), columnAttributeNumber: buf.ReadInt16(), oid: buf.ReadUInt32(), typeSize: buf.ReadInt16(), typeModifier: buf.ReadInt32(), - formatCode: (FormatCode)buf.ReadInt16() + dataFormat: DataFormatUtils.Create(buf.ReadInt16()) ); _nameIndex.TryAdd(field.Name, i); @@ -77,9 +91,9 @@ internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, TypeMapper typeMapper) } internal static RowDescriptionMessage CreateForReplication( - TypeMapper typeMapper, uint tableOID, FormatCode formatCode, IReadOnlyList columns) + PgSerializerOptions options, uint tableOID, DataFormat dataFormat, IReadOnlyList columns) { - var msg = new RowDescriptionMessage(columns.Count); + var msg = new RowDescriptionMessage(false, columns.Count); var numFields = msg.Count = columns.Count; for (var i = 0; i < numFields; ++i) @@ -88,14 +102,14 @@ internal static RowDescriptionMessage CreateForReplication( var column = columns[i]; field.Populate( - typeMapper, - name: column.ColumnName, - tableOID: tableOID, + options, + name: column.ColumnName, + tableOID: tableOID, columnAttributeNumber: checked((short)i), - oid: column.DataTypeId, - typeSize: 0, // TODO: Confirm we don't have this in replication - typeModifier: column.TypeModifier, - formatCode: formatCode + oid: column.DataTypeId, + typeSize: 0, // TODO: Confirm we don't have this in replication + typeModifier: column.TypeModifier, + dataFormat: dataFormat ); if (!msg._nameIndex.ContainsKey(field.Name)) @@ -105,29 +119,59 @@ internal static RowDescriptionMessage CreateForReplication( return msg; } - public FieldDescription this[int index] + public FieldDescription this[int ordinal] { + [MethodImpl(MethodImplOptions.AggressiveInlining)] get { - Debug.Assert(index < Count); - Debug.Assert(_fields[index] != null); + if ((uint)ordinal >= (uint)Count) + { + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", Count); + return default!; + } + + Debug.Assert(_fields[ordinal] != null); + return _fields[ordinal]!; + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + internal void GetConversionContext(int ordinal, Type type, ref ReadConversionContext result) + => this[ordinal].GetConversionContext(type, ref result); - return _fields[index]!; + internal void SetColumnInfoCache(ReadOnlySpan values) + { + if (_connectorOwned || _lastConverterInfoCache is not null) + return; + Interlocked.CompareExchange(ref _lastConverterInfoCache, values.ToArray(), null); + } + + internal void LoadColumnInfoCache(PgSerializerOptions options, ReadConversionContext[] values) + { + if (_lastConverterInfoCache is not { } cache) + return; + + // If the options have changed (for instance due to ReloadTypes) we need to invalidate the cache. + if (Count > 0 && !ReferenceEquals(options, _fields[0]!._serializerOptions)) + { + Interlocked.CompareExchange(ref _lastConverterInfoCache, null, cache); + return; } + + cache.CopyTo(values.AsSpan()); } public int Count { get; private set; } - public IEnumerator GetEnumerator() => new Enumerator(this); - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - /// /// Given a string name, returns the field's ordinal index in the row. /// internal int GetFieldIndex(string name) - => TryGetFieldIndex(name, out var ret) - ? ret - : throw new IndexOutOfRangeException("Field not found in row: " + name); + { + if (!TryGetFieldIndex(name, out var ret)) + ThrowHelper.ThrowIndexOutOfRangeException($"Field not found in row: {name}"); + return ret; + } /// /// Given a string name, returns the field's ordinal index in the row. @@ -140,7 +184,7 @@ internal bool TryGetFieldIndex(string name, out int fieldIndex) if (_insensitiveIndex is null || _insensitiveIndex.Count == 0) { if (_insensitiveIndex == null) - _insensitiveIndex = new Dictionary(InsensitiveComparer.Instance); + _insensitiveIndex = new Dictionary(InvariantIgnoreCaseAndKanaWidthComparer); foreach (var kv in _nameIndex) _insensitiveIndex.TryAdd(kv.Key, kv.Value); @@ -152,50 +196,6 @@ internal bool TryGetFieldIndex(string name, out int fieldIndex) public BackendMessageCode Code => BackendMessageCode.RowDescription; internal RowDescriptionMessage Clone() => new(this); - - /// - /// Comparer that's case-insensitive and Kana width-insensitive - /// - sealed class InsensitiveComparer : IEqualityComparer - { - public static readonly InsensitiveComparer Instance = new(); - static readonly CompareInfo CompareInfo = CultureInfo.InvariantCulture.CompareInfo; - - InsensitiveComparer() {} - - // We should really have CompareOptions.IgnoreKanaType here, but see - // https://github.com/dotnet/corefx/issues/12518#issuecomment-389658716 - public bool Equals(string? x, string? y) - => CompareInfo.Compare(x, y, CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType) == 0; - - public int GetHashCode(string o) - => CompareInfo.GetSortKey(o, CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType).GetHashCode(); - } - - sealed class Enumerator : IEnumerator - { - readonly RowDescriptionMessage _rowDescription; - int _pos = -1; - - public Enumerator(RowDescriptionMessage rowDescription) - => _rowDescription = rowDescription; - - public FieldDescription Current - => _pos >= 0 ? _rowDescription[_pos] : throw new InvalidOperationException(); - - object IEnumerator.Current => Current; - - public bool MoveNext() - { - if (_pos == _rowDescription.Count - 1) - return false; - _pos++; - return true; - } - - public void Reset() => _pos = -1; - public void Dispose() {} - } } /// @@ -205,14 +205,14 @@ public void Dispose() {} public sealed class FieldDescription { #pragma warning disable CS8618 // Lazy-initialized type - internal FieldDescription() {} + internal FieldDescription() { } internal FieldDescription(uint oid) - : this("?", 0, 0, oid, 0, 0, FormatCode.Binary) {} + : this("?", 0, 0, oid, 0, 0, DataFormat.Binary) { } internal FieldDescription( string name, uint tableOID, short columnAttributeNumber, - uint oid, short typeSize, int typeModifier, FormatCode formatCode) + uint oid, short typeSize, int typeModifier, DataFormat dataFormat) { Name = name; TableOID = tableOID; @@ -220,38 +220,41 @@ internal FieldDescription( TypeOID = oid; TypeSize = typeSize; TypeModifier = typeModifier; - FormatCode = formatCode; + DataFormat = dataFormat; } #pragma warning restore CS8618 internal FieldDescription(FieldDescription source) { - _typeMapper = source._typeMapper; + _serializerOptions = source._serializerOptions; Name = source.Name; TableOID = source.TableOID; ColumnAttributeNumber = source.ColumnAttributeNumber; TypeOID = source.TypeOID; TypeSize = source.TypeSize; TypeModifier = source.TypeModifier; - FormatCode = source.FormatCode; - Handler = source.Handler; + DataFormat = source.DataFormat; + PostgresType = source.PostgresType; + Field = source.Field; + _objectConversionContext = source._objectConversionContext; } internal void Populate( - TypeMapper typeMapper, string name, uint tableOID, short columnAttributeNumber, - uint oid, short typeSize, int typeModifier, FormatCode formatCode + PgSerializerOptions serializerOptions, string name, uint tableOID, short columnAttributeNumber, + uint oid, short typeSize, int typeModifier, DataFormat dataFormat ) { - _typeMapper = typeMapper; + _serializerOptions = serializerOptions; Name = name; TableOID = tableOID; ColumnAttributeNumber = columnAttributeNumber; TypeOID = oid; TypeSize = typeSize; TypeModifier = typeModifier; - FormatCode = formatCode; - - ResolveHandler(); + DataFormat = dataFormat; + PostgresType = _serializerOptions.DatabaseInfo.FindPostgresType((Oid)TypeOID)?.GetRepresentationalType() ?? UnknownBackendType.Instance; + Field = new(Name, _serializerOptions.ToCanonicalTypeId(PostgresType), TypeModifier); + _objectConversionContext = default; } /// @@ -286,43 +289,123 @@ internal void Populate( /// /// The format code being used for the field. - /// Currently will be zero (text) or one (binary). + /// Currently will be text or binary. /// In a RowDescription returned from the statement variant of Describe, the format code is not yet known and will always be zero. /// - internal FormatCode FormatCode { get; set; } - - internal string TypeDisplayName => PostgresType.GetDisplayNameWithFacets(TypeModifier); + internal DataFormat DataFormat { get; set; } /// - /// The Npgsql type handler assigned to handle this field. - /// Returns for fields with format text. + /// Whether this field's data was requested in text format because the user opted into UnknownResultType + /// (via NpgsqlCommand.UnknownResultTypeList or AllResultTypesAreUnknown). Bindings for such fields are + /// expected to reinterpret the text bytes through a converter that could potentially only support binary formats. /// - internal NpgsqlTypeHandler Handler { get; private set; } + /// + /// DataFormat.Text today exclusively signals that we executed with an UnknownResultTypeList. + /// If we ever want to fully support DataFormat.Text we'll need to flow UnknownResultType status separately. + /// + internal bool IsUnknownResultType => DataFormat is DataFormat.Text; + + internal Field Field { get; private set; } + + internal string TypeDisplayName => PostgresType.GetDisplayNameWithFacets(TypeModifier); - internal PostgresType PostgresType - => _typeMapper.DatabaseInfo.ByOID.TryGetValue(TypeOID, out var postgresType) - ? postgresType - : UnknownBackendType.Instance; + internal PostgresType PostgresType { get; private set; } - internal Type FieldType => Handler.GetFieldType(this); + internal Type FieldType => ObjectConversionContext.TypeInfo.Type; - internal void ResolveHandler() - => Handler = IsBinaryFormat ? _typeMapper.ResolveByOID(TypeOID) : _typeMapper.UnrecognizedTypeHandler; + ReadConversionContext _objectConversionContext; + internal ReadConversionContext ObjectConversionContext + { + get + { + if (!_objectConversionContext.IsDefault) + return _objectConversionContext; - TypeMapper _typeMapper; + GetInfoAndBind(null, ref _objectConversionContext); + return _objectConversionContext; + } + } - internal bool IsBinaryFormat => FormatCode == FormatCode.Binary; - internal bool IsTextFormat => FormatCode == FormatCode.Text; + internal PgSerializerOptions _serializerOptions; internal FieldDescription Clone() { - var field = new FieldDescription(this); - field.ResolveHandler(); + var field = new FieldDescription(this); return field; } + internal void GetConversionContext(Type type, ref ReadConversionContext result) => GetInfoAndBind(type, ref result); + void GetInfoAndBind(Type? type, ref ReadConversionContext result) + { + Debug.Assert(result.IsDefault || ( + ReferenceEquals(_serializerOptions, result.TypeInfo.Options) && ( + IsUnknownResultType && result.TypeInfo.PgTypeId == _serializerOptions.TextPgTypeId || + // Normal resolution + result.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)) + ), "Cache is bleeding over"); + + if (result is { IsDefault: false, TypeInfo.Type: var typeToConvert } && typeToConvert == type) + return; + + var objectInfo = DataFormat is DataFormat.Text && type is not null ? ObjectConversionContext : _objectConversionContext; + if (objectInfo.TypeInfo is not null && (typeof(object) == type || objectInfo.TypeInfo.Type == type)) + { + result = objectInfo; + return; + } + + Core(type, out result); + if (!result.IsDefault && result.Binding.DataFormat != DataFormat) + ThrowHelper.ThrowInvalidOperationException( + $"Binding for column '{Name}' produced format '{result.Binding.DataFormat}' but the field format is '{DataFormat}'."); + + [MethodImpl(MethodImplOptions.NoInlining)] + void Core(Type? type, out ReadConversionContext lastReadConversionContext) + { + PgFieldBinding binding; + switch (DataFormat) + { + case DataFormat.Text when IsUnknownResultType: + { + // Resolve the converter against pg_catalog.text, UnknownResultType reads text bytes + // for any column type. Every pg_catalog.text mapping we own declares text-format support, so a converter that + // can't bind to text here throws and surfaces as a missing mapping rather than getting silently reinterpreted. + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgTypeId, _serializerOptions); + var concreteTypeInfo = typeInfo.MakeConcreteForField(Field); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, _serializerOptions, _serializerOptions.TextPgTypeId, resolved: true); + + binding = concreteTypeInfo.BindField(DataFormat.Text); + lastReadConversionContext = new(concreteTypeInfo, binding); + break; + } + case DataFormat.Binary or DataFormat.Text: + { + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), _serializerOptions.ToCanonicalTypeId(PostgresType), _serializerOptions); + var concreteTypeInfo = typeInfo.MakeConcreteForField(Field); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, _serializerOptions, _serializerOptions.ToCanonicalTypeId(PostgresType), resolved: true); + + // If we don't support the DataFormat we'll just throw. + binding = concreteTypeInfo.BindField(DataFormat); + lastReadConversionContext = new(concreteTypeInfo, binding); + break; + } + default: + ThrowHelper.ThrowUnreachableException("Unknown data format {0}", DataFormat); + lastReadConversionContext = default; + break; + } + + // We delay initializing ObjectOrDefaultInfo until after the first lookup (unless it is itself the first lookup). + // When passed in an unsupported type it allows the error to be more specific, instead of just having object/null to deal with. + if (_objectConversionContext.TypeInfo is null && type is not null) + _ = ObjectConversionContext; + } + } + /// /// Returns a string that represents the current object. /// - public override string ToString() => Name + (Handler == null ? "" : $"({Handler.PgDisplayName})"); + public override string ToString() => Name + $"({PostgresType.DisplayName})"; } diff --git a/src/Npgsql/GlobalSuppressions.cs b/src/Npgsql/GlobalSuppressions.cs index 07bef71ab3..6507c11514 100644 --- a/src/Npgsql/GlobalSuppressions.cs +++ b/src/Npgsql/GlobalSuppressions.cs @@ -1,7 +1,7 @@ - -// This file is used by Code Analysis to maintain SuppressMessage + +// This file is used by Code Analysis to maintain SuppressMessage // attributes that are applied to this project. -// Project-level suppressions either have no target or are given +// Project-level suppressions either have no target or are given // a specific target and scoped to a namespace, type, member, etc. using System.Diagnostics.CodeAnalysis; @@ -10,6 +10,5 @@ [assembly: SuppressMessage("Design", "CA1032:Implement standard exception constructors", Justification = "We have several exception classes where this makes no sense")] [assembly: SuppressMessage("Design", "CA1710:Identifiers should have correct suffix", Justification = "Disagree")] [assembly: SuppressMessage("Design", "CA1707:Remove the underscores from member name", Justification = "Seems to cause some false positives on implicit/explicit cast operators, strange")] -[assembly: SuppressMessage("Reliability", "CA2007:Do not directly await a Task", Justification = "Npgsql uses NoSynchronizationContextScope instead of ConfigureAwait(false)")] [assembly: SuppressMessage("Style", "IDE1006:Naming Styles", Justification = "All I/O methods are both sync and async, avoid clutter")] diff --git a/src/Npgsql/ICancelable.cs b/src/Npgsql/ICancelable.cs index 460f17c171..27fe829563 100644 --- a/src/Npgsql/ICancelable.cs +++ b/src/Npgsql/ICancelable.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading.Tasks; namespace Npgsql; diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs new file mode 100644 index 0000000000..16bfcec3fc --- /dev/null +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -0,0 +1,71 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal; + +static class AdoSerializerHelpers +{ + public static PgTypeInfo GetTypeInfoForReading(Type type, PgTypeId pgTypeId, PgSerializerOptions options) + { + PgTypeInfo? typeInfo = null; + Exception? inner = null; + try + { + typeInfo = options.GetTypeInfoInternal(type, pgTypeId); + } + catch (Exception ex) + { + inner = ex; + } + return typeInfo ?? ThrowReadingNotSupported(type, options, pgTypeId, inner); + } + + public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, PgSerializerOptions options, NpgsqlDbType? npgsqlDbType = null) + { + Debug.Assert(type != typeof(object), "Parameters of type object are not supported."); + + PgTypeInfo? typeInfo = null; + Exception? inner = null; + try + { + typeInfo = options.GetTypeInfoInternal(type, pgTypeId); + } + catch (Exception ex) + { + inner = ex; + } + return typeInfo ?? ThrowWritingNotSupported(type, options, pgTypeId, npgsqlDbType, inner: inner); + } + + // InvalidCastException thrown to align with ADO.NET convention. + // resolved=true distinguishes the "resolution succeeded but the resolved converter opted out of this + // direction" case (e.g. read-only converters) from the "no converter could be found / resolution threw" + // case — important for diagnosing user reports. + [DoesNotReturn] + internal static PgTypeInfo ThrowReadingNotSupported(Type? type, PgSerializerOptions options, PgTypeId pgTypeId, Exception? inner = null, bool resolved = false) + { + var typeFragment = type is null ? "" : $" as '{type.FullName}'{(resolved ? " (resolved)" : "")}"; + var dataTypeNameFragment = $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId)?.DisplayName ?? "unknown"}'"; + var innerHint = inner is null ? "" : " See the inner exception for details."; + + throw new InvalidCastException($"Reading{typeFragment} is not supported for fields having {dataTypeNameFragment}.{innerHint}", inner); + } + + [DoesNotReturn] + internal static PgTypeInfo ThrowWritingNotSupported(Type? type, PgSerializerOptions options, PgTypeId? pgTypeId, NpgsqlDbType? npgsqlDbType = null, string? parameterName = null, Exception? inner = null, bool resolved = false) + { + var pgTypeFragment = pgTypeId is null + ? "no NpgsqlDbType or DataTypeName. Try setting one of these values to the expected database type." + : npgsqlDbType is null + ? $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId.GetValueOrDefault())?.DisplayName ?? "unknown"}'" + : $"NpgsqlDbType '{npgsqlDbType}'"; + var parameterFragment = parameterName is null ? "parameters" : $"parameter '{parameterName}'"; + var typeFragment = type is null ? "" : $" values of type '{type.FullName}'{(resolved ? " (resolved)" : "")}"; + var innerHint = inner is null ? "" : " See the inner exception for details."; + + throw new InvalidCastException($"Writing{typeFragment} is not supported for {parameterFragment} having {pgTypeFragment}.{innerHint}", inner); + } +} diff --git a/src/Npgsql/Internal/BufferRequirements.cs b/src/Npgsql/Internal/BufferRequirements.cs new file mode 100644 index 0000000000..9551687426 --- /dev/null +++ b/src/Npgsql/Internal/BufferRequirements.cs @@ -0,0 +1,68 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public readonly struct BufferRequirements : IEquatable +{ + readonly Size _read; + readonly Size _write; + + BufferRequirements(Size read, Size write) + { + _read = read; + _write = write; + } + + public Size Read => _read; + public Size Write => _write; + + /// Streaming + public static BufferRequirements None => new(Size.Unknown, Size.Unknown); + /// Entire value should be buffered + public static BufferRequirements Value => new(Size.CreateUpperBound(int.MaxValue), Size.CreateUpperBound(int.MaxValue)); + /// Fixed size value should be buffered + public static BufferRequirements CreateFixedSize(int byteCount) => new(byteCount, byteCount); + /// Custom requirements + public static BufferRequirements Create(Size value) => new(value, value); + public static BufferRequirements Create(Size read, Size write) => new(read, write); + + public BufferRequirements Combine(Size read, Size write) + => new(_read.Combine(read), _write.Combine(write)); + + public BufferRequirements Combine(BufferRequirements other) + => Combine(other._read, other._write); + + public BufferRequirements Combine(int byteCount) + => Combine(CreateFixedSize(byteCount)); + + public bool Equals(BufferRequirements other) => _read.Equals(other._read) && _write.Equals(other._write); + public override bool Equals(object? obj) => obj is BufferRequirements other && Equals(other); + public override int GetHashCode() => HashCode.Combine(_read, _write); + public static bool operator ==(BufferRequirements left, BufferRequirements right) => left.Equals(right); + public static bool operator !=(BufferRequirements left, BufferRequirements right) => !left.Equals(right); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static int GetMinimumBufferByteCount(Size bufferRequirement, int valueSize) + { + ArgumentOutOfRangeException.ThrowIfNegative(valueSize); + var reqByteCount = bufferRequirement.GetValueOrDefault(); + switch (bufferRequirement.Kind) + { + case SizeKind.Exact: + if (reqByteCount != valueSize) + ThrowExactMismatch(reqByteCount, valueSize); + goto default; + case SizeKind.UpperBound: + return Math.Min(valueSize, reqByteCount); + default: + return reqByteCount; + } + + static void ThrowExactMismatch(int expected, int actual) + => throw new ArgumentOutOfRangeException(nameof(bufferRequirement), + $"Exact buffer requirement size ({expected} bytes) does not match the value size ({actual} bytes)."); + } +} diff --git a/src/Npgsql/Internal/ChainDbTypeResolver.cs b/src/Npgsql/Internal/ChainDbTypeResolver.cs new file mode 100644 index 0000000000..16f3c229ee --- /dev/null +++ b/src/Npgsql/Internal/ChainDbTypeResolver.cs @@ -0,0 +1,33 @@ +using System; +using System.Collections.Generic; +using System.Data; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class ChainDbTypeResolver(IEnumerable resolvers) : IDbTypeResolver +{ + readonly IDbTypeResolver[] _resolvers = new List(resolvers).ToArray(); + + public string? GetDataTypeName(DbType dbType, Type? type) + { + foreach (var resolver in _resolvers) + { + if (resolver.GetDataTypeName(dbType, type) is { } dataTypeName) + return dataTypeName; + } + + return null; + } + + public DbType? GetDbType(DataTypeName dataTypeName) + { + foreach (var resolver in _resolvers) + { + if (resolver.GetDbType(dataTypeName) is { } dbType) + return dbType; + } + + return null; + } +} diff --git a/src/Npgsql/Internal/ChainTypeInfoResolver.cs b/src/Npgsql/Internal/ChainTypeInfoResolver.cs new file mode 100644 index 0000000000..4c7f56e454 --- /dev/null +++ b/src/Npgsql/Internal/ChainTypeInfoResolver.cs @@ -0,0 +1,21 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class ChainTypeInfoResolver(IEnumerable resolvers) : IPgTypeInfoResolver +{ + readonly IPgTypeInfoResolver[] _resolvers = new List(resolvers).ToArray(); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + foreach (var resolver in _resolvers) + { + if (resolver.GetTypeInfo(type, dataTypeName, options) is { } info) + return info; + } + + return null; + } +} diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs new file mode 100644 index 0000000000..0917dfd834 --- /dev/null +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs @@ -0,0 +1,103 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using Npgsql.Util; + +namespace Npgsql.Internal.Composites; + +abstract class CompositeBuilder(StrongBox[] tempBoxes, IReadOnlyList fields) +{ + protected readonly StrongBox[] _tempBoxes = tempBoxes; + protected readonly IReadOnlyList _fields = fields; + protected int _currentField; + protected object? _boxedInstance; + + protected abstract void Construct(); + + public void AddValue(TValue value) + { + var tempBoxes = _tempBoxes; + var currentField = _currentField; + if (currentField >= tempBoxes.Length) + { + if (currentField == tempBoxes.Length) + Construct(); + SetField(value); + } + else + { + ((StrongBox)tempBoxes[currentField]).TypedValue = value; + if (currentField + 1 == tempBoxes.Length) + Construct(); + } + + _currentField++; + + void SetField(TValue value) + { + if (_boxedInstance is null) + ThrowHelper.ThrowInvalidOperationException("Not constructed yet, or no more fields were expected."); + + var currentField = _currentField; + var fields = _fields; + if (currentField > fields.Count - 1) + ThrowHelper.ThrowIndexOutOfRangeException($"Cannot set field {value} at position {currentField} - all fields have already been set"); + + ((CompositeFieldInfo)fields[currentField]).Set(_boxedInstance, value); + } + } +} + +sealed class CompositeBuilder(CompositeInfo compositeInfo) : CompositeBuilder(compositeInfo.CreateTempBoxes(), compositeInfo.Fields), IDisposable +{ + T _instance = default!; + + public T Complete() + { + if (_currentField < compositeInfo.Fields.Count) + throw new InvalidOperationException($"Missing values, expected: {compositeInfo.Fields.Count} got: {_currentField}"); + + return (T)(_boxedInstance ?? _instance!); + } + + protected override void Construct() + { + var tempBoxes = _tempBoxes; + if (_currentField < tempBoxes.Length - 1) + throw new InvalidOperationException($"Missing values, expected: {tempBoxes.Length} got: {_currentField + 1}"); + + var fields = compositeInfo.Fields; + var args = ArrayPool.Shared.Rent(compositeInfo.ConstructorParameters); + for (var i = 0; i < tempBoxes.Length; i++) + { + var field = fields[i]; + if (field.ConstructorParameterIndex is { } argIndex) + args[argIndex] = tempBoxes[i]; + } + _instance = compositeInfo.Constructor(args)!; + ArrayPool.Shared.Return(args, clearArray: true); + + if (tempBoxes.Length == compositeInfo.Fields.Count) + return; + + // We're expecting or already have stored more fields, so box the instance once here. + _boxedInstance = _instance; + for (var i = 0; i < tempBoxes.Length; i++) + { + var field = compositeInfo.Fields[i]; + if (field.ConstructorParameterIndex is null) + field.Set(_boxedInstance, tempBoxes[i]); + } + } + + public void Reset() + { + _instance = default!; + _boxedInstance = null; + _currentField = 0; + foreach (var box in _tempBoxes) + box.Clear(); + } + + public void Dispose() { } +} diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs new file mode 100644 index 0000000000..b6f41a8cc3 --- /dev/null +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -0,0 +1,308 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal.Composites; + +abstract class CompositeFieldInfo +{ + protected PgTypeInfo PgTypeInfo { get; } + protected PgConverter? Converter { get; } + protected BufferRequirements _binaryBufferRequirements; + + /// + /// CompositeFieldInfo constructor. + /// + /// Name of the field. + /// Type info for reading/writing. + /// The nominal field type, this may differ from the typeInfo.PgTypeId when the field is a domain type. + private protected CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId) + { + Name = name; + PgTypeInfo = typeInfo; + PgTypeId = nominalPgTypeId; + + if (typeInfo.PgTypeId is null) + ThrowHelper.ThrowArgumentException("Type info cannot have an undecided PgTypeId.", nameof(typeInfo)); + + PgConcreteTypeInfo concrete; + if (typeInfo is PgConcreteTypeInfo direct) + { + concrete = direct; + } + else if (typeInfo is PgProviderTypeInfo providerTypeInfo) + { + // Lift the default concrete's buffer requirements and converter so the composite gets an + // accurate per-field size even when resolution is deferred. IsProviderBacked still signals that + // GetWriteInfo / GetSize must go through BindValue for per-value dispatch at bind time — + // that's where provider-backed fields (DateTime kind, late-bound, etc.) surface deterministic + // errors. The cached default is reused by GetDefaultWriteInfo on CompositeConverter's Path A, + // where per-value resolution has already completed without producing state. + concrete = providerTypeInfo.GetDefault(null); + IsProviderBacked = true; + } + else + { + ThrowHelper.ThrowInvalidOperationException($"Unsupported {nameof(PgTypeInfo)} '{typeInfo.GetType().FullName}' for composite field '{name}'."); + return; + } + + if (!concrete.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + { + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + return; + } + _binaryBufferRequirements = bufferRequirements; + Converter = concrete.Converter; + } + + public PgConverter GetReadInfo(out Size readRequirement) + { + if (!IsProviderBacked) + { + readRequirement = _binaryBufferRequirements.Read; + return Converter; + } + + var concreteTypeInfo = PgTypeInfo.MakeConcreteForField(new Field(Name, PgTypeInfo.PgTypeId.GetValueOrDefault(), -1)); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(PgTypeInfo.Type, PgTypeInfo.Options, concreteTypeInfo.PgTypeId, resolved: true); + if (!concreteTypeInfo.TryBindField(DataFormat.Binary, out var binding)) + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + + readRequirement = binding.BufferRequirement; + return concreteTypeInfo.Converter; + } + + public PgConverter GetWriteInfo(object instance, out Size writeRequirement, out object? writeState) + { + if (!IsProviderBacked) + { + writeState = null; + writeRequirement = _binaryBufferRequirements.Write; + return Converter; + } + + return BindValue(instance, out writeRequirement, out writeState); + } + + /// + /// Returns a deterministic write converter for this field without running per-value dispatch — + /// for concrete fields the one-and-only converter, for provider fields the default concrete that + /// was resolved at construction. Used by CompositeConverter.Write's Path A, which only runs when + /// bind-time GetSize has already completed and produced no per-field state; the default converter + /// writes the same bytes as any value-dispatched variant for a decided field id and carries no + /// state to dispose. + /// + public PgConverter GetDefaultWriteInfo(out Size writeRequirement) + { + Debug.Assert(Converter is not null); + writeRequirement = _binaryBufferRequirements.Write; + return Converter; + } + + protected ValueTask ReadAsObject(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken) + { + if (async) + { + var task = converter.ReadAsObjectAsync(reader, cancellationToken); + if (!task.IsCompletedSuccessfully) + return Core(builder, task); + + AddValue(builder, task.Result); + } + else + AddValue(builder, converter.ReadAsObject(reader)); + return new(); + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + async ValueTask Core(CompositeBuilder builder, ValueTask task) + { + builder.AddValue(await task.ConfigureAwait(false)); + } + } + + protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter writer, object value, CancellationToken cancellationToken) + { + if (async) + return converter.WriteAsObjectAsync(writer, value, cancellationToken); + + converter.WriteAsObject(writer, value); + return new(); + } + + public string Name { get; } + public PgTypeId PgTypeId { get; } + public Size BinaryReadRequirement => _binaryBufferRequirements.Read; + public Size BinaryWriteRequirement => _binaryBufferRequirements.Write; + + /// True when this field defers converter resolution to bind time via a provider. + [MemberNotNullWhen(false, nameof(Converter))] + public bool IsProviderBacked { get; } + + public abstract Type Type { get; } + + protected abstract PgConverter BindValue(object instance, out Size writeRequirement, out object? writeState); + protected abstract void AddValue(CompositeBuilder builder, object value); + + public abstract StrongBox CreateBox(); + public abstract void Set(object instance, StrongBox value); + public abstract int? ConstructorParameterIndex { get; } + public abstract bool IsDbNullable { get; } + + public abstract void ReadDbNull(CompositeBuilder builder); + public abstract ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); + public abstract bool IsDbNull(PgConverter converter, object instance, object? writeState); + public abstract Size? IsDbNullOrGetSize(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState); + public abstract ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken); +} + +sealed class CompositeFieldInfo : CompositeFieldInfo +{ + readonly Action? _setter; + readonly int _parameterIndex; + readonly Func _getter; + readonly bool _asObject; + + CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter) + : base(name, typeInfo, nominalPgTypeId) + { + if (typeInfo.Type != typeof(T)) + ThrowHelper.ThrowInvalidOperationException($"PgTypeInfo type '{typeInfo.Type.FullName}' must be equal to field type '{typeof(T)}'."); + + // Converter is populated by the base constructor for both concrete and provider type infos — + // for providers it holds the default concrete's converter. _asObject is derived from it and is + // used by AsObject's fast path when the runtime converter matches the cached default. + if (Converter is not null) + { + var typeToConvert = Converter.TypeToConvert; + _asObject = typeToConvert != typeof(T); + if (!typeToConvert.IsAssignableFrom(typeof(T))) + ThrowHelper.ThrowInvalidOperationException($"Converter type '{typeToConvert.FullName}' must be assignable from field type '{typeof(T)}'."); + } + + _getter = getter; + } + + // Accessed through reflection (ReflectionCompositeInfoFactory) + public CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter, int parameterIndex) + : this(name, typeInfo, nominalPgTypeId, getter) + => _parameterIndex = parameterIndex; + + // Accessed through reflection (ReflectionCompositeInfoFactory) + public CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter, Action setter) + : this(name, typeInfo, nominalPgTypeId, getter) + => _setter = setter; + + bool AsObject(PgConverter converter) + => ReferenceEquals(Converter, converter) ? _asObject : converter.TypeToConvert != typeof(T); + + public override Type Type => typeof(T); + + public override int? ConstructorParameterIndex => _setter is not null ? null : _parameterIndex; + + public T Get(object instance) => _getter(instance); + + public override StrongBox CreateBox() => new Util.StrongBox(); + + public void Set(object instance, T value) + { + if (_setter is null) + ThrowHelper.ThrowInvalidOperationException("Not a composite field for a clr field."); + + _setter(instance, value); + } + + public override void Set(object instance, StrongBox value) + { + if (_setter is null) + ThrowHelper.ThrowInvalidOperationException("Not a composite field for a clr field."); + + _setter(instance, ((Util.StrongBox)value).TypedValue!); + } + + public override void ReadDbNull(CompositeBuilder builder) + { + if (default(T) != null) + ThrowHelper.ThrowInvalidCastException($"Type {typeof(T).FullName} does not have null as a possible value."); + + builder.AddValue((T?)default); + } + + protected override PgConverter BindValue(object instance, out Size writeRequirement, out object? writeState) + { + var value = _getter(instance); + var concreteTypeInfo = PgTypeInfo.MakeConcreteForValue(value, out writeState); + if (!concreteTypeInfo.SupportsWriting) + AdoSerializerHelpers.ThrowWritingNotSupported(typeof(T), PgTypeInfo.Options, concreteTypeInfo.PgTypeId, resolved: true); + if (!concreteTypeInfo.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + { + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + writeRequirement = default; + return default; + } + + writeRequirement = bufferRequirements.Write; + return concreteTypeInfo.Converter; + } + + protected override void AddValue(CompositeBuilder builder, object value) => builder.AddValue((T)value); + + public override ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default) + { + if (AsObject(converter)) + return ReadAsObject(async, converter, builder, reader, cancellationToken); + + if (async) + { + var task = ((PgConverter)converter).ReadAsync(reader, cancellationToken); + if (!task.IsCompletedSuccessfully) + return Core(builder, task); + + builder.AddValue(task.Result); + } + else + builder.AddValue(((PgConverter)converter).Read(reader)); + return new(); + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + async ValueTask Core(CompositeBuilder builder, ValueTask task) + { + builder.AddValue(await task.ConfigureAwait(false)); + } + } + + public override bool IsDbNullable => Converter?.IsDbNullable ?? true; + + public override bool IsDbNull(PgConverter converter, object instance, object? writeState) + { + var value = _getter(instance); + return AsObject(converter) ? converter.IsDbNullAsObject(value, writeState) : ((PgConverter)converter).IsDbNull(value, writeState); + } + + public override Size? IsDbNullOrGetSize(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState) + { + var value = _getter(instance); + return AsObject(converter) + ? converter.IsDbNullOrGetSizeAsObject(format, writeRequirement, value, ref writeState) + : ((PgConverter)converter).IsDbNullOrGetSize(format, writeRequirement, value, ref writeState); + } + + public override ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken) + { + var value = _getter(instance); + if (AsObject(converter)) + return WriteAsObject(async, converter, writer, value!, cancellationToken); + + if (async) + return ((PgConverter)converter).WriteAsync(writer, value!, cancellationToken); + + ((PgConverter)converter).Write(writer, value!); + return new(); + } +} diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs new file mode 100644 index 0000000000..f1e291cf53 --- /dev/null +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs @@ -0,0 +1,54 @@ +using System; +using System.Collections.Generic; +using Npgsql.Util; + +namespace Npgsql.Internal.Composites; + +sealed class CompositeInfo +{ + readonly int _lastConstructorFieldIndex; + readonly CompositeFieldInfo[] _fields; + + public CompositeInfo(CompositeFieldInfo[] fields, int constructorParameters, Func constructor) + { + _lastConstructorFieldIndex = -1; + var constructorFields = 0; + for (var i = 0; i < fields.Length; i++) + { + if (fields[i].ConstructorParameterIndex is not null) + { + _lastConstructorFieldIndex = i; + constructorFields++; + } + } + + if (constructorParameters != constructorFields) + throw new InvalidOperationException($"Missing composite fields to map to the required {constructorParameters} constructor parameters."); + + _fields = fields; + Constructor = constructor; + ConstructorParameters = constructorParameters; + } + + public IReadOnlyList Fields => _fields; + + public int ConstructorParameters { get; } + public Func Constructor { get; } + + /// + /// Create temporary storage for all values that come before the constructor parameters can be saturated. + /// + /// + public StrongBox[] CreateTempBoxes() + { + if (_lastConstructorFieldIndex is -1) + return []; + + var boxes = new StrongBox[_lastConstructorFieldIndex + 1]; + var fields = _fields; + for (var i = 0; i < boxes.Length; i++) + boxes[i] = fields[i].CreateBox(); + + return boxes; + } +} diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs new file mode 100644 index 0000000000..c520c4fdf9 --- /dev/null +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -0,0 +1,299 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Linq.Expressions; +using System.Reflection; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Util; +using NpgsqlTypes; + +namespace Npgsql.Internal.Composites; + +[RequiresDynamicCode("Serializing arbitrary types can require creating new generic types or methods. This may not work when AOT compiling.")] +static class ReflectionCompositeInfoFactory +{ + public static CompositeInfo CreateCompositeInfo<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( + PostgresCompositeType pgType, INpgsqlNameTranslator nameTranslator, PgSerializerOptions options) + { + var pgFields = pgType.Fields; + var propertyMap = MapProperties(pgFields, nameTranslator); + var fieldMap = MapFields(pgFields, nameTranslator); + + var duplicates = propertyMap.Keys.Intersect(fieldMap.Keys).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Property {propertyMap[duplicates[0]].Name} and field {fieldMap[duplicates[0]].Name} map to the same '{pgFields[duplicates[0]].Name}' composite field name."); + + var (constructorInfo, parameterFieldMap) = MapBestMatchingConstructor(pgFields, nameTranslator); + var constructorParameters = constructorInfo?.GetParameters() ?? []; + var compositeFields = new CompositeFieldInfo?[pgFields.Count]; + for (var i = 0; i < parameterFieldMap.Length; i++) + { + var fieldIndex = parameterFieldMap[i]; + var pgField = pgFields[fieldIndex]; + var parameter = constructorParameters[i]; + var reprTypeId = options.ToCanonicalTypeId(pgField.Type.GetRepresentationalType()); + PgTypeInfo pgTypeInfo; + Delegate getter; + if (propertyMap.TryGetValue(fieldIndex, out var property) && property.GetMethod is not null) + { + if (property.PropertyType != parameter.ParameterType) + throw new InvalidOperationException($"Could not find a matching getter for constructor parameter {parameter.Name} and type {parameter.ParameterType} mapped to composite field {pgFields[fieldIndex].Name}."); + + pgTypeInfo = options.GetTypeInfoInternal(property.PropertyType, reprTypeId) ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); + getter = CreateGetter(property); + } + else if (fieldMap.TryGetValue(fieldIndex, out var field)) + { + if (field.FieldType != parameter.ParameterType) + throw new InvalidOperationException($"Could not find a matching getter for constructor parameter {parameter.Name} and type {parameter.ParameterType} mapped to composite field {pgFields[fieldIndex].Name}."); + + pgTypeInfo = options.GetTypeInfoInternal(field.FieldType, reprTypeId) ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); + getter = CreateGetter(field); + } + else + throw new InvalidOperationException($"Cannot find property or field for composite field {pgFields[fieldIndex].Name}."); + + compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, pgTypeInfo, options.ToCanonicalTypeId(pgField.Type), getter, i); + } + + for (var fieldIndex = 0; fieldIndex < pgFields.Count; fieldIndex++) + { + // Handled by constructor. + if (compositeFields[fieldIndex] is not null) + continue; + + var pgField = pgFields[fieldIndex]; + var reprTypeId = options.ToCanonicalTypeId(pgField.Type.GetRepresentationalType()); + PgTypeInfo pgTypeInfo; + Delegate getter; + Delegate setter; + if (propertyMap.TryGetValue(fieldIndex, out var property)) + { + pgTypeInfo = options.GetTypeInfoInternal(property.PropertyType, reprTypeId) + ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); + getter = CreateGetter(property); + setter = CreateSetter(property); + } + else if (fieldMap.TryGetValue(fieldIndex, out var field)) + { + pgTypeInfo = options.GetTypeInfoInternal(field.FieldType, reprTypeId) + ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); + getter = CreateGetter(field); + setter = CreateSetter(field); + } + else + throw new InvalidOperationException($"Cannot find property or field for composite field '{pgFields[fieldIndex].Name}'."); + + compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, pgTypeInfo, options.ToCanonicalTypeId(pgField.Type), getter, setter); + } + + Debug.Assert(compositeFields.All(x => x is not null)); + + var constructor = constructorInfo is null ? _ => Activator.CreateInstance() : CreateStrongBoxConstructor(constructorInfo); + return new CompositeInfo(compositeFields!, constructorInfo is null ? 0 : constructorParameters.Length, constructor); + + static NotSupportedException NotSupportedField(PostgresCompositeType composite, PostgresCompositeType.Field field, bool isField, string name, Type type) + => new($"No mapping could be found for ('{type.FullName}', '{field.Type.FullName}'). Mapping: CLR {(isField ? "field" : "property")} '{typeof(T).FullName}.{name}' <-> Composite field '{composite.Name}.{field.Name}'"); + } + + static Delegate CreateGetter(FieldInfo info) + { + var instance = Expression.Parameter(typeof(object), "instance"); + return Expression + .Lambda(typeof(Func<,>).MakeGenericType(typeof(object), info.FieldType), + Expression.Field(UnboxAny(instance, typeof(T)), info), + instance) + .Compile(); + } + + static Delegate CreateSetter(FieldInfo info) + { + var instance = Expression.Parameter(typeof(object), "instance"); + var value = Expression.Parameter(info.FieldType, "value"); + + return Expression + .Lambda(typeof(Action<,>).MakeGenericType(typeof(object), info.FieldType), + Expression.Assign(Expression.Field(UnboxAny(instance, typeof(T)), info), value), instance, value) + .Compile(); + } + + static Delegate CreateGetter(PropertyInfo info) + { + var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor([typeof(string)])!; + var instance = Expression.Parameter(typeof(object), "instance"); + var body = info.GetMethod is null || !info.GetMethod.IsPublic + ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, + Expression.Constant($"No (public) getter for '{info}' on type {typeof(T)}")), info.PropertyType) + : Expression.Property(UnboxAny(instance, typeof(T)), info); + + return Expression + .Lambda(typeof(Func<,>).MakeGenericType(typeof(object), info.PropertyType), body, instance) + .Compile(); + } + + static Delegate CreateSetter(PropertyInfo info) + { + var instance = Expression.Parameter(typeof(object), "instance"); + var value = Expression.Parameter(info.PropertyType, "value"); + + var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor([typeof(string)])!; + var body = info.SetMethod is null || !info.SetMethod.IsPublic + ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, + Expression.Constant($"No (public) setter for '{info}' on type {typeof(T)}")), info.PropertyType) + : Expression.Call(UnboxAny(instance, typeof(T)), info.SetMethod, value); + + return Expression + .Lambda(typeof(Action<,>).MakeGenericType(typeof(object), info.PropertyType), body, instance, value) + .Compile(); + } + + static Expression UnboxAny(Expression expression, Type type) + => type.IsValueType ? Expression.Unbox(expression, type) : Expression.Convert(expression, type, null); + + [DynamicDependency(nameof(StrongBox.TypedValue), typeof(StrongBox<>))] + [DynamicDependency(DynamicallyAccessedMemberTypes.PublicProperties, typeof(StrongBox[]))] + [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "DynamicDependencies in place for the System.Linq.Expression.Property calls")] + static Func CreateStrongBoxConstructor(ConstructorInfo constructorInfo) + { + var values = Expression.Parameter(typeof(StrongBox[]), "values"); + + var parameters = constructorInfo.GetParameters(); + var parameterCount = Expression.Constant(parameters.Length); + var argumentExceptionNameMessageConstructor = typeof(ArgumentException).GetConstructor([typeof(string), typeof(string)])!; + return Expression + .Lambda>( + Expression.Block( + Expression.IfThen( + Expression.LessThan(Expression.Property(values, nameof(Array.Length)), parameterCount), + + Expression.Throw(Expression.New(argumentExceptionNameMessageConstructor, + Expression.Constant("Passed fewer arguments than there are constructor parameters."), Expression.Constant(values.Name))) + ), + Expression.New(constructorInfo, parameters.Select((parameter, i) => + Expression.Property( + UnboxAny( + Expression.ArrayIndex(values, Expression.Constant(i)), + typeof(StrongBox<>).MakeGenericType(parameter.ParameterType) + ), + nameof(StrongBox.TypedValue) + ) + )) + ), values) + .Compile(); + } + static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Delegate getter, int constructorParameterIndex) + => (CompositeFieldInfo)Activator.CreateInstance( + typeof(CompositeFieldInfo<>).MakeGenericType(type), name, typeInfo, nominalPgTypeId, getter, constructorParameterIndex)!; + + static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Delegate getter, Delegate setter) + => (CompositeFieldInfo)Activator.CreateInstance( + typeof(CompositeFieldInfo<>).MakeGenericType(type), name, typeInfo, nominalPgTypeId, getter, setter)!; + + static Dictionary MapProperties<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + { + var properties = typeof(T).GetProperties(BindingFlags.Public | BindingFlags.Instance); + var propertiesAndNames = properties.Select(x => + { + var attr = x.GetCustomAttribute(); + var name = attr?.PgName ?? nameTranslator.TranslateMemberName(x.Name); + return new KeyValuePair(name, x); + }).ToArray(); + + var duplicates = propertiesAndNames.GroupBy(x => x.Key).Where(g => g.Count() > 1).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Multiple properties are mapped to the '{duplicates[0].Key}' field."); + + var propertiesMap = propertiesAndNames.ToDictionary(x => x.Key, x => x.Value); + var result = new Dictionary(); + for (var i = 0; i < fields.Count; i++) + { + var field = fields[i]; + if (!propertiesMap.TryGetValue(field.Name, out var value)) + continue; + + result[i] = value; + } + + return result; + } + + static Dictionary MapFields<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + { + var clrFields = typeof(T).GetFields(BindingFlags.Public | BindingFlags.Instance); + var clrFieldsAndNames = clrFields.Select(x => + { + var attr = x.GetCustomAttribute(); + var name = attr?.PgName ?? nameTranslator.TranslateMemberName(x.Name); + return new KeyValuePair(name, x); + }).ToArray(); + + var duplicates = clrFieldsAndNames.GroupBy(x => x.Key).Where(g => g.Count() > 1).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Multiple properties are mapped to the '{duplicates[0].Key}' field."); + + var clrFieldsMap = clrFieldsAndNames.ToDictionary(x => x.Key, x => x.Value); + var result = new Dictionary(); + for (var i = 0; i < fields.Count; i++) + { + var field = fields[i]; + if (!clrFieldsMap.TryGetValue(field.Name, out var value)) + continue; + + result[i] = value; + } + + return result; + } + + static (ConstructorInfo? ConstructorInfo, int[] ParameterFieldMap) MapBestMatchingConstructor<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + { + ConstructorInfo? clrDefaultConstructor = null; + Exception? duplicatesException = null; + foreach (var constructor in typeof(T).GetConstructors().OrderByDescending(x => x.GetParameters().Length)) + { + var parameters = constructor.GetParameters(); + if (parameters.Length == 0) + clrDefaultConstructor = constructor; + + var parametersMap = new int[parameters.Length]; + Array.Fill(parametersMap, -1); + for (var i = 0; i < parameters.Length; i++) + { + var clrParameter = parameters[i]; + var attr = clrParameter.GetCustomAttribute(); + var name = attr?.PgName ?? (clrParameter.Name is { } clrName ? nameTranslator.TranslateMemberName(clrName) : null); + if (name is null) + break; + + for (var pgFieldIndex = 0; pgFieldIndex < fields.Count; pgFieldIndex++) + { + if (fields[pgFieldIndex].Name == name) + { + parametersMap[i] = pgFieldIndex; + break; + } + } + } + + if (parametersMap.Any(x => x is -1)) + continue; + + var duplicates = parametersMap.GroupBy(x => x).Where(g => g.Count() > 1).ToArray(); + if (duplicates.Length is 0) + return (constructor, parametersMap); + + duplicatesException = new AmbiguousMatchException($"Multiple parameters are mapped to the field '{fields[duplicates[0].Key].Name}' in constructor: {constructor}."); + } + + if (duplicatesException is not null) + throw duplicatesException; + + if (clrDefaultConstructor is null && !typeof(T).IsValueType) + throw new InvalidOperationException($"No parameterless constructor defined for type '{typeof(T)}'."); + + return (clrDefaultConstructor, []); + } +} diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs new file mode 100644 index 0000000000..8ae0b1e94d --- /dev/null +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -0,0 +1,520 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal.Converters; + +abstract class ArrayConverter : PgStreamingConverter where T : notnull +{ + readonly ArrayConverterCore _arrayConverterCore; + + private protected ArrayConverter(int? expectedDimensions, PgConcreteTypeInfo elementTypeInfo, int pgLowerBound = 1) + { + if (!elementTypeInfo.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Element converter has to support the binary format to be compatible."); + + _arrayConverterCore = new((IElementOperations)this, elementTypeInfo, elementTypeInfo.Converter.IsDbNullable, expectedDimensions, + bufferRequirements, elementTypeInfo.PgTypeId, pgLowerBound); + } + + public override T Read(PgReader reader) => (T)_arrayConverterCore.Read(async: false, reader).Result; + + public override unsafe ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + // Cheap if we have all the data. + var task = _arrayConverterCore.Read(async: true, reader, cancellationToken); + if (task.IsCompletedSuccessfully) + return new((T)task.Result); + + // Otherwise do these additional allocations (source and task) to allow us to share state machine codegen for all Ts. + // We don't use the PoolingCompletionSource here as it would be backed by an IValueTaskSource. + // Any ReadAsObjectAsync caller would call AsTask() on it immediately, causing another allocation and indirection. + var source = new AsyncHelpers.CompletionSource(); + AsyncHelpers.OnCompletedWithSource(task.AsTask(), source, new(this, &UnboxAndComplete)); + return source.Task; + + static void UnboxAndComplete(Task task, AsyncHelpers.CompletionSource completionSource) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost when T is a value type (like ReadOnlyMemory). + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + var result = (T)new ValueTask(Unsafe.As>(task)).Result; + + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(completionSource is AsyncHelpers.CompletionSource); + Unsafe.As>(completionSource).SetResult(result); + } + } + + public override Size GetSize(SizeContext context, T values, ref object? writeState) + => _arrayConverterCore.GetSize(context, values, ref writeState); + + public override void Write(PgWriter writer, T values) + => _arrayConverterCore.Write(async: false, writer, values, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) + => _arrayConverterCore.Write(async: true, writer, values, cancellationToken); + + public static ArrayConverter CreateArrayBased(PgConcreteTypeInfo elementTypeInfo, Type? effectiveType = null, int pgLowerBound = 1) + => new ArrayBased(elementTypeInfo, effectiveType, pgLowerBound); + + public static ArrayConverter CreateListBased(PgConcreteTypeInfo elementTypeInfo, int pgLowerBound = 1) + => new ListBased(elementTypeInfo, pgLowerBound); + + sealed class ArrayBased(PgConcreteTypeInfo elementTypeInfo, Type? effectiveType = null, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, + elementTypeInfo, pgLowerBound), IElementOperations + { + readonly PgConverter _elemConverter = (PgConverter)elementTypeInfo.Converter; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static TElement? GetValue(object collection, IterationIndices indices) + { + Debug.Assert(indices.Rank > 0); + switch (indices.Rank) + { + case 1: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[]); + return Unsafe.As(collection)[indices.One]; + case 2: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[,]); + return Unsafe.As(collection)[indices.Many![0], indices.Many![1]]; + default: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is Array); + return (TElement?)Unsafe.As(collection).GetValue(indices.Many!); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void SetValue(object collection, IterationIndices indices, TElement? value) + { + Debug.Assert(indices.Rank > 0); + switch (indices.Rank) + { + case 1: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[]); + Unsafe.As(collection)[indices.One] = value; + break; + case 2: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[,]); + Unsafe.As(collection)[indices.Many![0], indices.Many![1]] = value; + break; + default: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is Array); + Unsafe.As(collection).SetValue(value, indices.Many!); + break; + } + } + + object IElementOperations.CreateCollection(ReadOnlySpan lengths) + => lengths.Length switch + { + 0 => Array.Empty(), + 1 => new TElement?[lengths[0]], + 2 => new TElement?[lengths[0], lengths[1]], + 3 => new TElement?[lengths[0], lengths[1], lengths[2]], + 4 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3]], + 5 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4]], + 6 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5]], + 7 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6]], + 8 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]], + _ => throw new InvalidOperationException("Postgres arrays can have at most 8 dimensions.") + }; + + int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) + => ArrayConverterCore.GetArrayLengths((Array)collection, out lengths); + + Size? IElementOperations.IsDbNullOrGetSize(SizeContext context, object collection, IterationIndices indices, ref object? writeState) + => _elemConverter.IsDbNullOrGetSize(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); + + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) + return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); + + SetValue(collection, indices, isDbNull ? default : _elemConverter.Read(reader)); + return new(); + } + + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) + return ArrayConverterCore.AwaitTask(task, new(this, &SetResult), collection, indices); + + SetValue(collection, indices, result); + return new(); + + static void SetResult(Task task, object collection, IterationIndices indices) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + SetValue(collection, indices, new ValueTask(task: Unsafe.As>(task)).Result); + } + } + + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + if (async) + return _elemConverter.WriteAsync(writer, GetValue(collection, indices)!, cancellationToken); + + _elemConverter.Write(writer, GetValue(collection, indices)!); + return new(); + } + } + + sealed class ListBased(PgConcreteTypeInfo elementTypeInfo, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: 1, elementTypeInfo, pgLowerBound), IElementOperations + { + readonly PgConverter _elemConverter = (PgConverter)elementTypeInfo.Converter; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static TElement? GetValue(object collection, int index) + { + // Justification: avoid the cast overhead for per element calls. + Debug.Assert(collection is IList); + return Unsafe.As>(collection)[index]; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void SetValue(object collection, int index, TElement? value) + { + // Justification: avoid the cast overhead for per element calls. + Debug.Assert(collection is IList); + var list = Unsafe.As>(collection); + list.Insert(index, value); + } + + object IElementOperations.CreateCollection(ReadOnlySpan lengths) + => new List(lengths.Length is 0 ? 0 : lengths[0]); + + int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) + { + lengths = null; + return ((IList)collection).Count; + } + + Size? IElementOperations.IsDbNullOrGetSize(SizeContext context, object collection, IterationIndices indices, ref object? writeState) + => _elemConverter.IsDbNullOrGetSize(context.Format, context.BufferRequirement, GetValue(collection, indices.One), ref writeState); + + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Rank is 1); + if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) + return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); + + SetValue(collection, indices.One, isDbNull ? default : _elemConverter.Read(reader)); + return new(); + } + + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Rank is 1); + if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) + return ArrayConverterCore.AwaitTask(task, new(this, &SetResult), collection, indices); + + SetValue(collection, indices.One, result); + return new(); + + static void SetResult(Task task, object collection, IterationIndices indices) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + SetValue(collection, indices.One, new ValueTask(task: Unsafe.As>(task)).Result); + } + } + + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Rank is 1); + if (async) + return _elemConverter.WriteAsync(writer, GetValue(collection, indices.One)!, cancellationToken); + + _elemConverter.Write(writer, GetValue(collection, indices.One)!); + return new(); + } + } +} + +sealed class ArrayTypeInfoProvider(PgProviderTypeInfo elementTypeInfo, Type requestedMappingType) + : PgComposingTypeInfoProvider(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, + elementTypeInfo) + where T : notnull +{ + PgSerializerOptions Options => EffectiveTypeInfo.Options; + + protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => Options.GetArrayElementTypeId(pgTypeId); + protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => Options.GetArrayTypeId(effectivePgTypeId); + + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType) + { + if (typeof(T) == typeof(Array) || typeof(T).IsArray) + { + requestedType = requestedMappingType; + return ArrayConverter.CreateArrayBased(effectiveConcreteTypeInfo, requestedType); + } + + if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(IList<>)) + { + requestedType = requestedMappingType; + return ArrayConverter.CreateListBased(effectiveConcreteTypeInfo); + } + + throw new NotSupportedException($"Unknown type T: {typeof(T).FullName}"); + } + + protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? values, ref object? writeState) + { + PgConcreteTypeInfo? concreteTypeInfo = null; + PgArrayMetadata metadata; + ArrayPool<(Size, object?)>? elemDataArrayPool = null; + (Size, object? WriteState)[]? elemData = null; + + var index = 0; + switch (values) + { + case TElement[] array: + metadata = PgArrayMetadata.Create(ArrayConverterCore.GetArrayLengths(array, out _), null); + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); + if (state is not null && elemData is null) + { + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); + } + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) + { + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); + } + + index++; + } + + break; + case List list: + metadata = PgArrayMetadata.Create(list.Count, null); + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); + if (state is not null && elemData is null) + { + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); + } + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) + { + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); + } + + index++; + } + + break; + case IList list: + metadata = PgArrayMetadata.Create(list.Count, null); + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); + if (state is not null && elemData is null) + { + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); + } + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) + { + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); + } + + index++; + } + + break; + case Array array: + metadata = PgArrayMetadata.Create(ArrayConverterCore.GetArrayLengths(array, out var dimensionLengths), dimensionLengths); + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); + if (state is not null && elemData is null) + { + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); + } + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) + { + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); + } + + index++; + } + + break; + case null: + return null; + default: + throw new NotSupportedException(); + } + + if (elemData is not null) + { + writeState = new ArrayConverterWriteState + { + Metadata = metadata, + IterationIndices = metadata.CreateIndices(), + ArrayPool = elemDataArrayPool, + Data = new(elemData, 0, index), + AnyWriteState = true + }; + } + + return concreteTypeInfo; + } +} + +// T is Array as we only know what type it will be after reading 'contains nulls'. +sealed class PolymorphicArrayConverter( + PgConverter structElementCollectionConverter, + PgConverter nullableElementCollectionConverter) + : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(read: Size.CreateUpperBound(sizeof(int) + sizeof(int)), write: Size.Unknown); + return format is DataFormat.Binary; + } + + public override TBase Read(PgReader reader) + { + _ = reader.ReadInt32(); + var containsNulls = reader.ReadInt32() is 1; + reader.Rewind(sizeof(int) + sizeof(int)); + return containsNulls + ? nullableElementCollectionConverter.Read(reader) + : structElementCollectionConverter.Read(reader); + } + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + _ = reader.ReadInt32(); + var containsNulls = reader.ReadInt32() is 1; + reader.Rewind(sizeof(int) + sizeof(int)); + return containsNulls + ? nullableElementCollectionConverter.ReadAsync(reader, cancellationToken) + : structElementCollectionConverter.ReadAsync(reader, cancellationToken); + } + + public override Size GetSize(SizeContext context, TBase value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported"); + + public override void Write(PgWriter writer, TBase value) + => throw new NotSupportedException("Polymorphic writing is not supported"); + + public override ValueTask WriteAsync(PgWriter writer, TBase value, CancellationToken cancellationToken = default) + => throw new NotSupportedException("Polymorphic writing is not supported"); +} + +sealed class PolymorphicArrayTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgProviderTypeInfo _effectiveTypeInfo; + readonly PgProviderTypeInfo _effectiveNullableTypeInfo; + readonly ConcurrentDictionary _concreteInfoCache = new(ReferenceEqualityComparer.Instance); + + public PolymorphicArrayTypeInfoProvider(PgProviderTypeInfo effectiveTypeInfo, PgProviderTypeInfo effectiveNullableTypeInfo) + { + if (effectiveTypeInfo.PgTypeId is null || effectiveNullableTypeInfo.PgTypeId is null) + throw new ArgumentException("Type info cannot have an undecided PgTypeId.", + effectiveTypeInfo.PgTypeId is null ? nameof(effectiveTypeInfo) : nameof(effectiveNullableTypeInfo)); + + _effectiveTypeInfo = effectiveTypeInfo; + _effectiveNullableTypeInfo = effectiveNullableTypeInfo; + } + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + => GetOrAdd(_effectiveTypeInfo.GetDefault(pgTypeId), _effectiveNullableTypeInfo.GetDefault(pgTypeId)); + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, TBase? value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported."); + + protected override PgConcreteTypeInfo? GetForFieldCore(Field field) + { + var concreteTypeInfo = _effectiveTypeInfo.GetForField(field); + var concreteNullableTypeInfo = _effectiveNullableTypeInfo.GetForField(field); + + return concreteTypeInfo is not null && concreteNullableTypeInfo is not null + ? GetOrAdd(concreteTypeInfo, concreteNullableTypeInfo) + : null; + } + + PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo concreteTypeInfo, PgConcreteTypeInfo concreteNullableTypeInfo) + { + (PgConcreteTypeInfo ConcreteInfo, PgConcreteTypeInfo ConcreteNullableInfo) state = (concreteTypeInfo, concreteNullableTypeInfo); + return _concreteInfoCache.GetOrAdd(concreteTypeInfo, + static (_, state) => + new(state.ConcreteInfo.Options, + new PolymorphicArrayConverter((PgConverter)state.ConcreteInfo.Converter, (PgConverter)state.ConcreteNullableInfo.Converter), + state.ConcreteInfo.PgTypeId) { SupportsWriting = false }, + state); + } +} diff --git a/src/Npgsql/Internal/Converters/ArrayConverterCore.cs b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs new file mode 100644 index 0000000000..82e7550323 --- /dev/null +++ b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs @@ -0,0 +1,398 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal.Converters; + +interface IElementOperations +{ + object CreateCollection(ReadOnlySpan lengths); + int GetCollectionCount(object collection, out int[]? lengths); + Size? IsDbNullOrGetSize(SizeContext context, object collection, IterationIndices indices, ref object? writeState); + ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken = default); + ValueTask Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken = default); +} + +readonly struct ArrayConverterCore( + IElementOperations elemOps, + PgTypeInfo elementTypeInfo, + bool elemTypeDbNullable, + int? expectedDimensions, + BufferRequirements binaryRequirements, + PgTypeId elemTypeId, + int pgLowerBound = 1) +{ + // Exposed for testing + internal const string ReadNonNullableCollectionWithNullsExceptionMessage = + "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; + + PgTypeInfo ElementTypeInfo { get; } = elementTypeInfo; + bool ElemTypeDbNullable { get; } = elemTypeDbNullable; + + bool IsDbNull(object values, IterationIndices arrayIndices, object? writeState) + { + // This call will only skip GetSize if we are dealing with fixed size elements, otherwise we'll repeat sizing costs. + // Fixed-size element converters cannot produce per-value write state, so IsDbNullOrGetSize must + // leave writeState alone — any mutation is a contract violation in the element converter. + Debug.Assert(binaryRequirements.Write.Kind is SizeKind.Exact); + var originalWriteState = writeState; + var isDbNull = elemOps.IsDbNullOrGetSize(new(DataFormat.Binary, binaryRequirements.Write), values, arrayIndices, ref writeState) is null; + Debug.Assert(ReferenceEquals(writeState, originalWriteState), "Fixed-size element converter mutated writeState during a null probe."); + return isDbNull; + } + + // Sizes a single element, accumulates into running size/anyWriteState, and returns the per-slot Size (-1 sentinel for NULL). + [MethodImpl(MethodImplOptions.AggressiveInlining)] + Size SizeElement(SizeContext context, object values, IterationIndices indices, ref object? elemState, ref Size size, ref bool anyWriteState) + { + var elemSize = elemOps.IsDbNullOrGetSize(context, values, indices, ref elemState); + anyWriteState = anyWriteState || elemState is not null; + size = size.Combine(elemSize ?? 0); + return elemSize ?? -1; + } + + public Size GetSize(SizeContext context, object values, ref object? writeState) + { + Debug.Assert(context.Format is DataFormat.Binary); + + // Try to extract state from the provider phase (if anything). Provider-level state is consumed once per binding, + // so we don't need to check for or clean up leftover iteration state — there's no path that produces it. + var providerState = writeState as ArrayConverterWriteState; + + var metadata = providerState?.Metadata ?? PgArrayMetadata.Create(elemOps.GetCollectionCount(values, out var lengths), lengths); + if (metadata.TotalElements is 0) + { + // The provider phase doesn't construct write state when there are no elements to populate, so any state + // reaching this branch is stale from a prior binding and would otherwise leak through to Write as garbage. + if (writeState is not null) + ThrowHelper.ThrowArgumentException("Write state should be null for empty arrays.", nameof(writeState)); + return metadata.BinaryPreambleByteCount; + } + + var size = Size.Create(metadata.BinaryPreambleByteCount + sizeof(int) * metadata.TotalElements); + var indices = providerState?.IterationIndices ?? metadata.CreateIndices(); + var anyWriteState = providerState?.AnyWriteState ?? false; + var arrayPool = providerState?.ArrayPool; + var elemData = providerState?.Data.Array; + var fixedSizeElements = false; + if (binaryRequirements.Write is { Kind: SizeKind.Exact, Value: var elemByteCount }) + { + fixedSizeElements = true; + var nulls = 0; + var lastLength = metadata.LastDimension; + if (ElemTypeDbNullable) + { + do + { + if (IsDbNull(values, indices, elemData?[indices.IndicesSum].WriteState)) + nulls++; + } + while (indices.TryAdvance(lastLength, metadata.DimensionLengths)); + } + + size = size.Combine((metadata.TotalElements - nulls) * elemByteCount); + } + else + { + var lastCount = metadata.LastDimension; + if (elemData is null) + { + arrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = arrayPool.Rent(metadata.TotalElements); + // Own-rent: pool buffers may contain stale WriteState references, so start each state at null. + do + { + object? elemState = null; + var elemSize = SizeElement(context, values, indices, ref elemState, ref size, ref anyWriteState); + elemData[indices.IndicesSum] = (elemSize, elemState); + } + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); + } + else + { + // Provider-supplied elemData already has valid per-element WriteState, observe and extend it through the ref. + do + { + ref var elem = ref elemData[indices.IndicesSum]; + elem.Size = SizeElement(context, values, indices, ref elem.WriteState, ref size, ref anyWriteState); + } + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); + } + } + + var result = providerState ?? new() + { + Metadata = metadata, + IterationIndices = indices + }; + if (elemData is not null) + { + result.ArrayPool = arrayPool; + result.Data = new(elemData, 0, metadata.TotalElements); + result.AnyWriteState = anyWriteState; + } + result.FixedSizeElements = fixedSizeElements; + writeState = result; + return size; + } + + public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken = default) + { + Debug.Assert(reader.Current.Format is DataFormat.Binary); + if (reader.ShouldBuffer(sizeof(int) + sizeof(int) + sizeof(uint))) + await reader.Buffer(async, sizeof(int) + sizeof(int) + sizeof(uint), cancellationToken).ConfigureAwait(false); + + var dimensions = reader.ReadInt32(); + + var flags = (PgArrayMetadata.Flags)reader.ReadInt32(); + _ = reader.ReadUInt32(); // Element OID. + + if (!ElemTypeDbNullable && flags.HasFlag(PgArrayMetadata.Flags.ContainsNulls)) + ThrowHelper.ThrowInvalidCastException(ReadNonNullableCollectionWithNullsExceptionMessage); + + // Make sure we can read length + lower bound N dimension times. + if (reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)) + await reader.Buffer(async, (sizeof(int) + sizeof(int)) * dimensions, cancellationToken).ConfigureAwait(false); + + Debug.Assert(!reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)); + + int[]? dimensionLengths = null; + var lastDimension = 0; + scoped Span dimensionLengthsSpan; + switch (dimensions) + { + case 0: + // At 0, if we have expected dimensions create the collection as such, works around https://github.com/npgsql/npgsql/issues/1271. + switch (expectedDimensions) + { + case null or <= 1: + dimensionLengthsSpan = Span.Empty; + break; + case { } value: + dimensionLengthsSpan = stackalloc int[value]; + dimensionLengthsSpan.Clear(); + break; + } + break; + case 1: + lastDimension = reader.ReadInt32(); + _ = reader.ReadInt32(); // Lower bound + dimensionLengthsSpan = lastDimension is 0 ? Span.Empty : new(ref lastDimension); + break; + default: + dimensionLengths = new int[dimensions]; + for (var i = 0; i < dimensions; i++) + { + lastDimension = reader.ReadInt32(); + _ = reader.ReadInt32(); // Lower bound + dimensionLengths[i] = lastDimension; + } + dimensionLengthsSpan = dimensionLengths.AsSpan(); + break; + } + + var collection = elemOps.CreateCollection(dimensionLengthsSpan); + if (dimensions is 0 || lastDimension is 0) + return collection; + + if (expectedDimensions is not null && dimensions != expectedDimensions) + ThrowHelper.ThrowInvalidCastException( + $"Cannot read an array value with {dimensions} dimension{(dimensions == 1 ? "" : "s")} into a " + + $"collection type with {expectedDimensions} dimension{(expectedDimensions == 1 ? "" : "s")}. " + + $"Call GetValue or a version of GetFieldValue with the commas matching the expected amount of dimensions."); + + var indices = IterationIndices.Create(dimensions); + do + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var length = reader.ReadInt32(); + if (length is not -1) + { + var scope = await reader.BeginNestedRead(async, length, binaryRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + await elemOps.Read(async, reader, isDbNull: false, collection, indices, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + else + await elemOps.Read(async, reader, isDbNull: true, collection, indices, cancellationToken).ConfigureAwait(false); + } + while (indices.TryAdvance(lastDimension, dimensionLengths)); + + return collection; + } + + public async ValueTask Write(bool async, PgWriter writer, object values, CancellationToken cancellationToken) + { + Debug.Assert(writer.Current.Format is DataFormat.Binary); + var (metadata, state) = writer.Current.WriteState switch + { + ArrayConverterWriteState writeState => (writeState.Metadata, writeState), + null => (PgArrayMetadata.Create(0, null), null), + _ => throw new InvalidCastException($"Invalid write state, expected {typeof(ArrayConverterWriteState).FullName}.") + }; + + if (writer.ShouldFlush(metadata.BinaryPreambleByteCount)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(metadata.Dimensions); // Dimensions + writer.WriteInt32(0); // Flags (not really used) + writer.WriteAsOid(elemTypeId); + for (var dim = 0; dim < metadata.Dimensions; dim++) + { + writer.WriteInt32(metadata.DimensionLengths[dim]); + writer.WriteInt32(pgLowerBound); // Lower bound + } + + // We can stop here for empty collections. + if (state is null) + return; + + var elemData = state.Data.Array; + var indices = state.IterationIndices; + indices.Reset(); + var lastCount = metadata.LastDimension; + var offset = state.Data.Offset; + var fixedSizeElements = state.FixedSizeElements; + do + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var elem = elemData?[offset + indices.IndicesSum] ?? default; + var length = fixedSizeElements + ? ElemTypeDbNullable && IsDbNull(values, indices, elem.WriteState) ? -1 : binaryRequirements.Write.Value + : elem.Size.Value; + + writer.WriteInt32(length); + if (length is not -1) + { + using var _ = await writer.BeginNestedWrite(async, binaryRequirements.Write, + length, elem.WriteState, cancellationToken).ConfigureAwait(false); + await elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); + } + } + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); + } + + public static int GetArrayLengths(Array array, out int[]? dimensionLengths) + { + var dimensions = array.Rank; + + if (dimensions is 1) + { + dimensionLengths = null; + return array.Length; + } + + dimensionLengths = new int[dimensions]; + for (var i = 0; i < dimensionLengths.Length; i++) + dimensionLengths[i] = array.GetLength(i); + + // If we have a multidim array it may throw an overflow exception for large arrays (LongLength exists for these cases) + // however anything over int.MaxValue wouldn't fit in a parameter anyway so easier to throw here than deal with a long. + return array.Length; + } + + // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. + // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. + // The alternatives are: + // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). + // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + public static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, IterationIndices indices) + { + await task.ConfigureAwait(false); + continuation.Invoke(task, collection, indices); + // Guarantee the type stays loaded until the function pointer call is done. + GC.KeepAlive(continuation.Handle); + } + + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. + public readonly unsafe struct Continuation + { + public object Handle { get; } + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public Continuation(object handle, delegate* continuation) + { + Handle = handle; + _continuation = continuation; + } + + public void Invoke(Task task, object collection, IterationIndices indices) => _continuation(task, collection, indices); + } +} + +sealed class ArrayConverterWriteState : MultiWriteState +{ + public required PgArrayMetadata Metadata { get; init; } + public required IterationIndices IterationIndices { get; init; } + + /// When true, all non-null elements have a fixed binary size and Data is not populated with per-element sizes. + public bool FixedSizeElements { get; set; } +} + +readonly struct PgArrayMetadata +{ + const int MaxDimensions = 8; + + readonly int _totalElements; + readonly int[]? _dimensionLengths; + + PgArrayMetadata(int totalElements, int[]? dimensionLengths) + { + _totalElements = totalElements; + _dimensionLengths = dimensionLengths; + } + + public int TotalElements => _totalElements; + public int LastDimension => _dimensionLengths is null ? _totalElements : _dimensionLengths[^1]; + [UnscopedRef] + public ReadOnlySpan DimensionLengths + => _dimensionLengths is null ? new ReadOnlySpan(in _totalElements) : _dimensionLengths.AsSpan(); + public int Dimensions => _dimensionLengths?.Length ?? (_totalElements is 0 ? 0 : 1); + + public int BinaryPreambleByteCount => GetBinaryPreambleByteCount(TotalElements, Dimensions); + + public IterationIndices CreateIndices() => IterationIndices.Create(Dimensions); + + static int GetBinaryPreambleByteCount(int totalElements, int dimensions) + => sizeof(int) + // Dimensions + sizeof(int) + // Flags + sizeof(uint) + // Element OID + (totalElements is 0 ? 0 : dimensions * (sizeof(int) + sizeof(int))); // Dimensions * (array length and lower bound) + + public static PgArrayMetadata Create(long totalElements, int[]? dimensionLengths) + { + if (totalElements > int.MaxValue) + ThrowHelper.ThrowArgumentException("Postgres arrays cannot have more than int.MaxValue elements.", nameof(totalElements)); + + if (dimensionLengths?.Length is < 0 or > MaxDimensions) + ThrowHelper.ThrowArgumentException($"Postgres arrays can have at most {MaxDimensions} dimensions.", nameof(dimensionLengths)); + + return new((int)totalElements, dimensionLengths); + } + + public enum Flags + { + ContainsNulls = 1 + } +} diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs new file mode 100644 index 0000000000..bf85a06a9f --- /dev/null +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -0,0 +1,143 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +static class AsyncHelpers +{ + public static void OnCompletedWithSource(Task task, CompletionSource source, CompletionSourceContinuation continuation) + { + _ = Core(task, source, continuation); + + // Have our state machine be pooled, but don't return the task, source.Task should be used instead. + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + async ValueTask Core(Task task, CompletionSource source, CompletionSourceContinuation continuation) + { + try + { + await task.ConfigureAwait(false); + continuation.Invoke(task, source); + } + catch (Exception ex) + { + source.SetException(ex); + } + // Guarantee the type stays loaded until the function pointer call is done. + continuation.KeepAlive(); + } + } + + public abstract class CompletionSource + { + public abstract void SetException(Exception exception); + } + + public sealed class CompletionSource : CompletionSource + { + AsyncValueTaskMethodBuilder _amb; + + public ValueTask Task { get; } + + public CompletionSource() + { + _amb = AsyncValueTaskMethodBuilder.Create(); + // AsyncValueTaskMethodBuilder's Task and SetResult aren't thread safe in regard to each other + // Which is why we access it prematurely + Task = _amb.Task; + } + + public void SetResult(T value) + => _amb.SetResult(value); + + public override void SetException(Exception exception) + => _amb.SetException(exception); + } + + public sealed class PoolingCompletionSource : CompletionSource + { + PoolingAsyncValueTaskMethodBuilder _amb; + + public ValueTask Task { get; } + + public PoolingCompletionSource() + { + _amb = PoolingAsyncValueTaskMethodBuilder.Create(); + // PoolingAsyncValueTaskMethodBuilder's Task and SetResult aren't thread safe in regard to each other + // Which is why we access it prematurely + Task = _amb.Task; + } + + public void SetResult(T value) + => _amb.SetResult(value); + + public override void SetException(Exception exception) + => _amb.SetException(exception); + } + + // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. + // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. + public readonly unsafe struct CompletionSourceContinuation + { + readonly object _handle; + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public CompletionSourceContinuation(object handle, delegate* continuation) + { + _handle = handle; + _continuation = continuation; + } + + public void KeepAlive() => GC.KeepAlive(_handle); + + public void Invoke(Task task, CompletionSource tcs) => _continuation(task, tcs); + } + + public static unsafe ValueTask ReadAsyncAsNullable(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + where T : struct + { + // Cheap if we have all the data. + var task = effectiveConverter.ReadAsync(reader, cancellationToken); + if (task.IsCompletedSuccessfully) + return new(new T?(task.Result)); + + // Otherwise we do one additional allocation, this allows us to share state machine codegen for all Ts. + var source = new PoolingCompletionSource(); + OnCompletedWithSource(task.AsTask(), source, new(instance, &UnboxAndComplete)); + return source.Task; + + static void UnboxAndComplete(Task task, CompletionSource completionSource) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + Debug.Assert(completionSource is PoolingCompletionSource); + Unsafe.As>(completionSource).SetResult(new T?(new ValueTask(Unsafe.As>(task)).Result)); + } + } + + public static unsafe ValueTask ReadAsObjectAsyncAsT(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + { + // Cheap if we have all the data. + var task = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); + if (task.IsCompletedSuccessfully) + return new((T)task.Result); + + // Otherwise we do one additional allocation, this allows us to share state machine codegen for all Ts. + var source = new PoolingCompletionSource(); + OnCompletedWithSource(task.AsTask(), source, new(instance, &UnboxAndComplete)); + return source.Task; + + static void UnboxAndComplete(Task task, CompletionSource completionSource) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + Debug.Assert(completionSource is PoolingCompletionSource); + Unsafe.As>(completionSource).SetResult((T)new ValueTask(Unsafe.As>(task)).Result); + } + } +} diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs new file mode 100644 index 0000000000..f7cee926bb --- /dev/null +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -0,0 +1,247 @@ +using System; +using System.Buffers; +using System.Collections; +using System.Collections.Specialized; +using System.Diagnostics; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using static Npgsql.Internal.Converters.BitStringHelpers; + +namespace Npgsql.Internal.Converters; + +file static class BitStringHelpers +{ + public static int GetByteCountFromBitCount(int n) + { + const int BitShiftPerByte = 3; + Debug.Assert(n >= 0); + // Due to sign extension, we don't need to special case for n == 0, since ((n - 1) >> 3) + 1 = 0 + // This doesn't hold true for ((n - 1) / 8) + 1, which equals 1. + return (n - 1 + (1 << BitShiftPerByte)) >>> BitShiftPerByte; + } +} + +sealed class BitArrayBitStringConverter : PgStreamingConverter +{ + public override BitArray Read(PgReader reader) + { + if (reader.ShouldBuffer(sizeof(int))) + reader.Buffer(sizeof(int)); + + var bits = reader.ReadInt32(); + var bytes = new byte[GetByteCountFromBitCount(bits)]; + reader.ReadBytes(bytes); + return ReadValue(bytes, bits); + } + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.BufferAsync(sizeof(int), cancellationToken).ConfigureAwait(false); + + var bits = reader.ReadInt32(); + var bytes = new byte[GetByteCountFromBitCount(bits)]; + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + return ReadValue(bytes, bits); + } + + internal static BitArray ReadValue(byte[] bytes, int bits) + { + for (var i = 0; i < bytes.Length; i++) + { + ref var b = ref bytes[i]; + b = ReverseBits(b); + } + + return new(bytes) { Length = bits }; + + // https://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64Bits + static byte ReverseBits(byte b) => (byte)(((b * 0x80200802UL) & 0x0884422110UL) * 0x0101010101UL >> 32); + } + + public override Size GetSize(SizeContext context, BitArray value, ref object? writeState) + => sizeof(int) + GetByteCountFromBitCount(value.Length); + + public override void Write(PgWriter writer, BitArray value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask WriteAsync(PgWriter writer, BitArray value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, BitArray value, CancellationToken cancellationToken = default) + { + var byteCount = writer.Current.Size.Value - sizeof(int); + var array = ArrayPool.Shared.Rent(byteCount); + for (var pos = 0; pos < byteCount; pos++) + { + var bitPos = pos*8; + var bits = Math.Min(8, value.Length - bitPos); + var b = 0; + for (var i = 0; i < bits; i++) + b += (value[bitPos + i] ? 1 : 0) << (8 - i - 1); + array[pos] = (byte)b; + } + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(value.Length); + if (async) + await writer.WriteBytesAsync(new ReadOnlyMemory(array, 0, byteCount), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(new ReadOnlySpan(array, 0, byteCount)); + + ArrayPool.Shared.Return(array); + } +} + +sealed class BitVector32BitStringConverter : PgBufferedConverter +{ + static int MaxSize => sizeof(int) + sizeof(int); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(read: Size.CreateUpperBound(MaxSize), write: MaxSize); + return format is DataFormat.Binary; + } + + protected override BitVector32 ReadCore(PgReader reader) + { + if (reader.CurrentRemaining > sizeof(int) + sizeof(int)) + throw new InvalidCastException("Can't read a BIT(N) with more than 32 bits to BitVector32, only up to BIT(32)."); + + var bits = reader.ReadInt32(); + return GetByteCountFromBitCount(bits) switch + { + 4 => new(reader.ReadInt32()), + 3 => new((reader.ReadInt16() << 8) + reader.ReadByte()), + 2 => new(reader.ReadInt16() << 16), + 1 => new(reader.ReadByte() << 24), + _ => new(0) + }; + } + + protected override void WriteCore(PgWriter writer, BitVector32 value) + { + writer.WriteInt32(32); + writer.WriteInt32(value.Data); + } +} + +sealed class BoolBitStringConverter : PgBufferedConverter +{ + static int MaxSize => sizeof(int) + sizeof(byte); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(read: Size.CreateUpperBound(MaxSize), write: MaxSize); + return format is DataFormat.Binary; + } + + protected override bool ReadCore(PgReader reader) + { + var bits = reader.ReadInt32(); + return bits switch + { + > 1 => throw new InvalidCastException("Can't read a BIT(N) type to bool, only BIT(1)."), + // We make an accommodation for varbit with no data. + 0 => false, + _ => (reader.ReadByte() & 128) is not 0 + }; + } + + public override Size GetSize(SizeContext context, bool value, ref object? writeState) => MaxSize; + protected override void WriteCore(PgWriter writer, bool value) + { + writer.WriteInt32(1); + writer.WriteByte(value ? (byte)128 : (byte)0); + } +} + +sealed class StringBitStringConverter : PgStreamingConverter +{ + public override string Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var bits = reader.ReadInt32(); + var bytes = new byte[GetByteCountFromBitCount(bits)]; + if (async) + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + else + reader.ReadBytes(bytes); + + var bitArray = BitArrayBitStringConverter.ReadValue(bytes, bits); + var sb = new StringBuilder(bits); + for (var i = 0; i < bitArray.Count; i++) + sb.Append(bitArray[i] ? '1' : '0'); + + return sb.ToString(); + } + + public override Size GetSize(SizeContext context, string value, ref object? writeState) + { + if (value.AsSpan().IndexOfAnyExcept('0', '1') is not -1 and var index) + throw new ArgumentException($"Invalid bitstring character '{value[index]}' at index: {index}", nameof(value)); + + return sizeof(int) + GetByteCountFromBitCount(value.Length); + } + + public override void Write(PgWriter writer, string value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask WriteAsync(PgWriter writer, string value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, string value, CancellationToken cancellationToken) + { + var byteCount = writer.Current.Size.Value - sizeof(int); + var array = ArrayPool.Shared.Rent(byteCount); + for (var pos = 0; pos < byteCount; pos++) + { + var bitPos = pos*8; + var bits = Math.Min(8, value.Length - bitPos); + var b = 0; + for (var i = 0; i < bits; i++) + b += (value[bitPos + i] == '1' ? 1 : 0) << (8 - i - 1); + array[pos] = (byte)b; + } + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(value.Length); + if (async) + await writer.WriteBytesAsync(new ReadOnlyMemory(array, 0, byteCount), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(new ReadOnlySpan(array, 0, byteCount)); + + ArrayPool.Shared.Return(array); + } +} + +/// For BIT(1) columns specifically (read from a field with TypeModifier == 1), this provider returns a bool converter +/// to align with SqlClient (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). +/// Otherwise we return a BitArray converter. Polymorphic writing through this provider is not supported. +sealed class PolymorphicBitStringTypeInfoProvider(PgSerializerOptions options, PgTypeId bitString) : PgConcreteTypeInfoProvider +{ + readonly PgConcreteTypeInfo _boolConcreteTypeInfo = new(options, new BoolBitStringConverter(), bitString) { SupportsWriting = false }; + readonly PgConcreteTypeInfo _bitArrayConcreteTypeInfo = new(options, new BitArrayBitStringConverter(), bitString) { SupportsWriting = false }; + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + => GetConcreteInfo(field: null); + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported."); + + protected override PgConcreteTypeInfo GetForFieldCore(Field field) + => GetConcreteInfo(field); + + PgConcreteTypeInfo GetConcreteInfo(Field? field) + => field?.TypeModifier is 1 ? _boolConcreteTypeInfo : _bitArrayConcreteTypeInfo; +} diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs new file mode 100644 index 0000000000..1d7e143616 --- /dev/null +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -0,0 +1,97 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal.Converters; + +/// A converter that adapts a boxed converter's results to an exact-type converter over T, wrapping the read/write +/// paths through object to present a typed surface for a converter whose TypeToConvert is only a base of T. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public sealed class CastingConverter : PgConverter +{ + readonly PgConverter _effectiveConverter; + + public CastingConverter(PgConverter effectiveConverter) : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) + { + if (!typeof(T).IsInSubtypeRelationshipWith(effectiveConverter.TypeToConvert)) + throw new ArgumentException( + $"Values for the effective converter's type {effectiveConverter.TypeToConvert} cannot be cast to the type {typeof(T)} for this converter.", + nameof(effectiveConverter)); + + _effectiveConverter = effectiveConverter; + } + + protected override bool IsDbNullValue(T? value, object? writeState) => _effectiveConverter.IsDbNullAsObject(value, writeState); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => _effectiveConverter.CanConvert(format, out bufferRequirements); + + public override T Read(PgReader reader) => (T)_effectiveConverter.ReadAsObject(reader); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => this.ReadAsObjectAsyncAsT(_effectiveConverter, reader, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => _effectiveConverter.GetSizeAsObject(context, value!, ref writeState); + + public override void Write(PgWriter writer, T value) + => _effectiveConverter.WriteAsObject(writer, value!); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => _effectiveConverter.WriteAsObjectAsync(writer, value!, cancellationToken); + + internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) + => async + ? _effectiveConverter.ReadAsObjectAsync(reader, cancellationToken) + : new(_effectiveConverter.ReadAsObject(reader)); + + internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + // Cast here to keep our T contract, and otherwise return more accurate invalid cast exceptions (as the effective converter will cast as well). + if (async) + return _effectiveConverter.WriteAsObjectAsync(writer, (T)value, cancellationToken); + + _effectiveConverter.WriteAsObject(writer, (T)value); + return new(); + } +} + +// Given there aren't many instantiations of providers (and it's fairly involved to write a fast one) we use the composing base class. +sealed class CastingTypeInfoProvider(PgProviderTypeInfo effectiveProviderTypeInfo) + : PgComposingTypeInfoProvider(effectiveProviderTypeInfo.PgTypeId, effectiveProviderTypeInfo) +{ + protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; + protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; + + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType) + { + requestedType = null; + return new CastingConverter(effectiveConcreteTypeInfo.Converter); + } + + protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState) + => EffectiveTypeInfo.GetForValueAsObject(effectiveContext, value, out writeState); +} + +static class CastingTypeInfoExtensions +{ + [RequiresDynamicCode("Producing an exact-type info from one without an exact type can require creating new generic types or methods at runtime, which may not work when AOT compiling.")] + internal static PgTypeInfo ToExactTypeInfo(this PgTypeInfo typeInfo) + { + if (typeInfo.HasExactType) + return typeInfo; + + var type = typeInfo.Type; + if (typeInfo is PgProviderTypeInfo providerTypeInfo) + return new PgProviderTypeInfo(typeInfo.Options, + (PgConcreteTypeInfoProvider)Activator.CreateInstance(typeof(CastingTypeInfoProvider<>).MakeGenericType(type), + providerTypeInfo)!, typeInfo.PgTypeId); + + var concreteTypeInfo = (PgConcreteTypeInfo)typeInfo; + return new PgConcreteTypeInfo(typeInfo.Options, + (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(type), concreteTypeInfo.Converter)!, concreteTypeInfo.PgTypeId); + } +} diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs new file mode 100644 index 0000000000..59b9f75291 --- /dev/null +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -0,0 +1,325 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Composites; + +namespace Npgsql.Internal.Converters; + +sealed class CompositeConverter : PgStreamingConverter where T : notnull +{ + readonly CompositeInfo _composite; + readonly BufferRequirements _bufferRequirements; + // Precomputed write size from the constructor's combine pass, taken before the provider-field clamp + // and the upper-bound limit. When Exact, GetSize can return this directly without per-field sizing — + // the per-field loop still runs for bind-time resolution side-effects, but size is already known. + readonly Size _writeSizePrecomputed; + + public CompositeConverter(CompositeInfo composite) + { + _composite = composite; + + var req = BufferRequirements.CreateFixedSize(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); + var anyProviderField = false; + foreach (var field in _composite.Fields) + { + anyProviderField = anyProviderField || field.IsProviderBacked; + + var readReq = field.BinaryReadRequirement; + var writeReq = field.BinaryWriteRequirement; + + // If field is nullable we cannot depend on its buffer size being fixed. + if (field.IsDbNullable) + { + readReq = readReq.Combine(Size.CreateUpperBound(0)); + writeReq = writeReq.Combine(Size.CreateUpperBound(0)); + } + + var readSuccess = req.Read.TryCombine(readReq, out readReq); + var writeSuccess = req.Write.TryCombine(writeReq, out writeReq); + // If we fail to combine due to overflow return unknown. + req = BufferRequirements.Create(readSuccess ? readReq : Size.Unknown, writeSuccess ? writeReq : Size.Unknown); + } + + // Capture the combined write size before clamping so GetSize can return it unchanged. This is the + // full requirement we know internally — externally we hide it behind an upper-bound to force GetSize + // to fire for provider-backed composites, but the number itself is still correct. + _writeSizePrecomputed = req.Write; + + // When any field defers resolution to a provider, downgrade the externally-reported write size to + // an upper bound. This is the sole mechanism by which bind-time resolution is triggered: non-exact + // writes route through GetSize, where per-field GetWriteInfo calls dispatch into providers and + // surface deterministic value-level errors (e.g. DateTime kind mismatches) at bind instead of at + // first Write. Composites with only concrete fields stay exact and skip GetSize as before. + if (anyProviderField && req.Write.Kind is SizeKind.Exact) + req = BufferRequirements.Create(req.Read, Size.CreateUpperBound(req.Write.Value)); + + // We have to put a limit on the requirements we report otherwise smaller buffer sizes won't work. + req = BufferRequirements.Create(Limit(req.Read), Limit(req.Write)); + + _bufferRequirements = req; + + // Return unknown if we hit the limit. + static Size Limit(Size requirement) + { + const int maxByteCount = 1024; + return requirement.GetValueOrDefault() > maxByteCount ? requirement.Combine(Size.Unknown) : requirement; + } + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = _bufferRequirements; + return format is DataFormat.Binary; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + // TODO we can make a nice thread-static cache for this. + using var builder = new CompositeBuilder(_composite); + + var count = reader.ReadInt32(); + if (count != _composite.Fields.Count) + throw new InvalidOperationException("Cannot read composite type with mismatched number of fields."); + + foreach (var field in _composite.Fields) + { + if (reader.ShouldBuffer(sizeof(uint) + sizeof(int))) + await reader.Buffer(async, sizeof(uint) + sizeof(int), cancellationToken).ConfigureAwait(false); + + var oid = reader.ReadUInt32(); + var length = reader.ReadInt32(); + + // We're only requiring the PgTypeIds to be oids if this converter is actually used during execution. + // As a result we can still introspect in the global mapper and create all the info with portable ids. + if(oid != field.PgTypeId.Oid) + // We could remove this requirement by storing a dictionary of CompositeInfos keyed by backend. + throw new InvalidCastException( + $"Cannot read oid {oid} into composite field {field.Name} with oid {field.PgTypeId}. " + + $"This could be caused by a DDL change after this DataSource loaded its types, or a difference between column order of table composites between backends, make sure these line up identically."); + + if (length is -1) + field.ReadDbNull(builder); + else + { + var converter = field.GetReadInfo(out var readRequirement); + var scope = await reader.BeginNestedRead(async, length, readRequirement, cancellationToken).ConfigureAwait(false); + try + { + await field.Read(async, converter, builder, reader, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + } + + return builder.Complete(); + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + var boxedInstance = (object)value; + + // When the combine pass produced an exact size, every field is individually fixed-size and + // non-nullable — the only reason we're in GetSize at all is that some field defers resolution + // to a provider and we clamped externally to force this entry. Walk fields purely for bind-time + // resolution side effects; the size is the precomputed one from the constructor. Rent lazily + // so the common DateTime-kind-style case (providers that validate but produce no state) pays + // no ElementState array allocation. + if (_writeSizePrecomputed.Kind is SizeKind.Exact) + { + ElementState[]? data = null; + for (var i = 0; i < _composite.Fields.Count; i++) + { + var field = _composite.Fields[i]; + var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement, out var fieldState); + + // Skip populating the slot when the provider produced no state and the resolved converter is the same as the default. + // The common case — DateTime-kind and similar pure-validation providers — satisfies both and pays no slot allocation. + // A provider that happens to return a non-default concrete for a decided id still has its + // converter captured so Write uses it instead of demoting silently to the default. + if (fieldState is null && ReferenceEquals(converter, field.GetDefaultWriteInfo(out _))) + continue; + + if (data is null) + { + data = ArrayPool.Shared.Rent(_composite.Fields.Count); + // clear any stale slots left behind by the previous pool user. + Array.Clear(data, 0, _composite.Fields.Count); + } + + data[i] = new() + { + Size = writeRequirement, + WriteState = fieldState, + Converter = converter, + BufferRequirement = writeRequirement + }; + } + + if (data is null) + { + writeState = null; + return _writeSizePrecomputed; + } + + writeState = new WriteState + { + ArrayPool = ArrayPool.Shared, + Data = new(data, 0, _composite.Fields.Count), + AnyWriteState = true, + BoxedInstance = boxedInstance, + }; + return _writeSizePrecomputed; + } + + // Variable-size or nullable fields — per-field IsDbNullOrGetSize is needed to compute the total, + // and per-field sizes must flow forward to Write. Always rent. + var arrayPool = ArrayPool.Shared; + var slowData = arrayPool.Rent(_composite.Fields.Count); + var totalSize = Size.Create(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); + var anyWriteState = false; + for (var i = 0; i < _composite.Fields.Count; i++) + { + var field = _composite.Fields[i]; + var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement, out var fieldState); + var fieldSizeOrNull = field.IsDbNullOrGetSize(converter, context.Format, writeRequirement, boxedInstance, ref fieldState); + anyWriteState = anyWriteState || fieldState is not null; + slowData[i] = new() + { + Size = fieldSizeOrNull ?? -1, + WriteState = fieldState, + Converter = converter, + BufferRequirement = writeRequirement + }; + totalSize = totalSize.Combine(fieldSizeOrNull ?? 0); + } + + writeState = new WriteState + { + ArrayPool = arrayPool, + Data = new(slowData, 0, _composite.Fields.Count), + AnyWriteState = anyWriteState, + BoxedInstance = boxedInstance, + }; + return totalSize; + } + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) + { + // Null state is legitimate in two cases: + // 1. Exact-size composite — GetSize was skipped entirely. By construction of the combine pass + // this means no provider field, no variable field, no nullable field. + // 2. Clamped-by-provider composite — GetSize ran but every field's provider produced null + // state, so we skipped the WriteState allocation. All fields are individually fixed-size + // (that's what _writeSizePrecomputed.Kind is Exact guarantees), so it works the same + // way and resolution is just re-done via cached provider dispatch. + // Variable-size composites must always arrive with a populated WriteState, we can't recover + // per-field value-dependent sizes otherwise. + var writeState = writer.Current.WriteState switch + { + WriteState ws => ws, + null when _writeSizePrecomputed.Kind is SizeKind.Exact => null, + null => throw new InvalidOperationException("Composite Write requires per-field data from GetSize when any field is variable-size."), + _ => throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}.") + }; + Debug.Assert(_bufferRequirements.Write.Kind is not SizeKind.Exact || writeState is null, + "Exact-size composite must not carry write state — GetSize should have been skipped."); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(_composite.Fields.Count); + + var boxedInstance = writeState?.BoxedInstance ?? value; + var data = writeState?.Data.Array; + for (var i = 0; i < _composite.Fields.Count; i++) + { + if (writer.ShouldFlush(sizeof(uint) + sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var field = _composite.Fields[i]; + writer.WriteAsOid(field.PgTypeId); + + // No cached slot: uses GetDefaultWriteInfo which is stateless by construction, + // so there is nothing to dispose on this path. Per-value resolution, if it was needed, + // already ran at bind-time GetSize and would have populated the slot + // A slot with a null Converter is a default(ElementState) left behind by + // GetSize's lazy-rent: fields walked before the first state-producing provider aren't + // back-filled, and Write handles them per-slot the same way a fully-unallocated data + // array is handled in the truly-exact case. + ElementState elementState; + if (data?[i] is { Converter: not null } state) + elementState = state; + else + { + var converter = field.GetDefaultWriteInfo(out var writeRequirement); + elementState = new() + { + Size = field.IsDbNull(converter, boxedInstance, writeState: null) ? -1 : writeRequirement, + WriteState = null, + Converter = converter, + BufferRequirement = writeRequirement, + }; + } + var length = elementState.Size.Value; + writer.WriteInt32(length); + if (length is not -1) + { + using var _ = await writer.BeginNestedWrite(async, elementState.BufferRequirement, length, elementState.WriteState, cancellationToken).ConfigureAwait(false); + await field.Write(async, elementState.Converter, writer, boxedInstance, cancellationToken).ConfigureAwait(false); + } + } + } + + readonly struct ElementState + { + public required Size Size { get; init; } + public required object? WriteState { get; init; } + public required PgConverter Converter { get; init; } + public required Size BufferRequirement { get; init; } + } + + class WriteState : IDisposable + { + public required ArrayPool? ArrayPool { get; init; } + public required ArraySegment Data { get; init; } + public required bool AnyWriteState { get; init; } + public required object BoxedInstance { get; init; } + + public void Dispose() + { + if (Data.Array is not { } array) + return; + + if (AnyWriteState) + for (var i = Data.Offset; i < Data.Offset + Data.Count; i++) + if (array[i].WriteState is IDisposable disposable) + disposable.Dispose(); + + Array.Clear(array, Data.Offset, Data.Count); + ArrayPool?.Return(array); + } + } +} diff --git a/src/Npgsql/Internal/Converters/EnumConverter.cs b/src/Npgsql/Internal/Converters/EnumConverter.cs new file mode 100644 index 0000000000..12f85992f0 --- /dev/null +++ b/src/Npgsql/Internal/Converters/EnumConverter.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Text; + +namespace Npgsql.Internal.Converters; + +[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] +sealed class EnumConverter : PgBufferedConverter where TEnum : struct, Enum +{ + readonly Dictionary _enumToLabel; + readonly Dictionary _labelToEnum; + readonly Encoding _encoding; + + // Unmapped enums + public EnumConverter(Dictionary enumToLabel, Dictionary labelToEnum, Encoding encoding) + { + _enumToLabel = new(enumToLabel.Count); + foreach (var kv in enumToLabel) + _enumToLabel.Add((TEnum)kv.Key, kv.Value); + + _labelToEnum = new(labelToEnum.Count); + foreach (var kv in labelToEnum) + _labelToEnum.Add(kv.Key, (TEnum)kv.Value); + + _encoding = encoding; + } + + public EnumConverter(Dictionary enumToLabel, Dictionary labelToEnum, Encoding encoding) + { + _enumToLabel = enumToLabel; + _labelToEnum = labelToEnum; + _encoding = encoding; + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Value; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override Size GetSize(SizeContext context, TEnum value, ref object? writeState) + { + if (!_enumToLabel.TryGetValue(value, out var str)) + throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); + + return _encoding.GetByteCount(str); + } + + protected override TEnum ReadCore(PgReader reader) + { + var str = _encoding.GetString(reader.ReadBytes(reader.CurrentRemaining)); + var success = _labelToEnum.TryGetValue(str, out var value); + + if (!success) + throw new InvalidCastException($"Received enum value '{str}' from database which wasn't found on enum {typeof(TEnum)}"); + + return value; + } + + protected override void WriteCore(PgWriter writer, TEnum value) + { + if (!_enumToLabel.TryGetValue(value, out var str)) + throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); + + writer.WriteBytes(new ReadOnlySpan(_encoding.GetBytes(str))); + } +} diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs new file mode 100644 index 0000000000..9e88fbe8f1 --- /dev/null +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs @@ -0,0 +1,222 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; +using static NpgsqlTypes.NpgsqlTsQuery.NodeKind; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TsQueryConverter(Encoding encoding) : PgStreamingConverter + where T : NpgsqlTsQuery +{ + public override T Read(PgReader reader) + => (T)Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => (T)await Read(async: true, reader, cancellationToken).ConfigureAwait(false); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var numTokens = reader.ReadInt32(); + if (numTokens == 0) + return new NpgsqlTsQueryEmpty(); + + NpgsqlTsQuery? value = null; + var nodes = new Stack<(NpgsqlTsQuery Node, int Location)>(); + + for (var i = 0; i < numTokens; i++) + { + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + switch (reader.ReadByte()) + { + case 1: // lexeme + if (reader.ShouldBuffer(sizeof(byte) + sizeof(byte))) + await reader.Buffer(async, sizeof(byte) + sizeof(byte), cancellationToken).ConfigureAwait(false); + var weight = (NpgsqlTsQueryLexeme.Weight)reader.ReadByte(); + var prefix = reader.ReadByte() != 0; + + var str = async + ? await reader.ReadNullTerminatedStringAsync(encoding, cancellationToken).ConfigureAwait(false) + : reader.ReadNullTerminatedString(encoding); + InsertInTree(new NpgsqlTsQueryLexeme(str, weight, prefix), nodes, ref value); + continue; + + case 2: // operation + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + var kind = (NpgsqlTsQuery.NodeKind)reader.ReadByte(); + + NpgsqlTsQuery node; + switch (kind) + { + case Not: + node = new NpgsqlTsQueryNot(null!); + InsertInTree(node, nodes, ref value); + nodes.Push((node, 0)); + continue; + + case And: + node = new NpgsqlTsQueryAnd(null!, null!); + break; + case Or: + node = new NpgsqlTsQueryOr(null!, null!); + break; + case Phrase: + if (reader.ShouldBuffer(sizeof(short))) + await reader.Buffer(async, sizeof(short), cancellationToken).ConfigureAwait(false); + node = new NpgsqlTsQueryFollowedBy(null!, reader.ReadInt16(), null!); + break; + default: + throw new UnreachableException( + $"Internal Npgsql bug: unexpected value {kind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug."); + } + + InsertInTree(node, nodes, ref value); + + nodes.Push((node, 1)); + nodes.Push((node, 2)); + continue; + + case var tokenType: + throw new UnreachableException( + $"Internal Npgsql bug: unexpected token type {tokenType} when reading tsquery. Please file a bug."); + } + } + + if (nodes.Count != 0) + throw new UnreachableException("Internal Npgsql bug, please report."); + + return value!; + + static void InsertInTree(NpgsqlTsQuery node, Stack<(NpgsqlTsQuery Node, int Location)> nodes, ref NpgsqlTsQuery? value) + { + if (nodes.Count == 0) + value = node; + else + { + var parent = nodes.Pop(); + switch (parent.Location) + { + case 0: + ((NpgsqlTsQueryNot)parent.Node).Child = node; + break; + case 1: + ((NpgsqlTsQueryBinOp)parent.Node).Left = node; + break; + case 2: + ((NpgsqlTsQueryBinOp)parent.Node).Right = node; + break; + default: + throw new UnreachableException("Internal Npgsql bug, please report."); + } + } + } + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => value.Kind is Empty + ? 4 + : 4 + GetNodeLength(value); + + int GetNodeLength(NpgsqlTsQuery node) + => node.Kind switch + { + Lexeme when encoding.GetByteCount(((NpgsqlTsQueryLexeme)node).Text) is var strLen + => strLen > 2046 + ? throw new InvalidCastException("Lexeme text too long. Must be at most 2046 encoded bytes.") + : 4 + strLen, + And or Or => 2 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right), + Not => 2 + GetNodeLength(((NpgsqlTsQueryNot)node).Child), + Empty => throw new InvalidOperationException("Empty tsquery nodes must be top-level"), + + // 2 additional bytes for uint16 phrase operator "distance" field. + Phrase => 4 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right), + + _ => throw new UnreachableException( + $"Internal Npgsql bug: unexpected value {node.Kind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug.") + }; + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlTsQuery value, CancellationToken cancellationToken) + { + var numTokens = GetTokenCount(value); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(numTokens); + + if (numTokens is 0) + return; + + await WriteCore(value).ConfigureAwait(false); + + async Task WriteCore(NpgsqlTsQuery node) + { + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte(node.Kind is Lexeme ? (byte)1 : (byte)2); + + if (node.Kind is Lexeme) + { + var lexemeNode = (NpgsqlTsQueryLexeme)node; + + if (writer.ShouldFlush(sizeof(byte) + sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte((byte)lexemeNode.Weights); + writer.WriteByte(lexemeNode.IsPrefixSearch ? (byte)1 : (byte)0); + + if (async) + await writer.WriteCharsAsync(lexemeNode.Text.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(lexemeNode.Text.AsMemory().Span, encoding); + + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte(0); + return; + } + + writer.WriteByte((byte)node.Kind); + + switch (node.Kind) + { + case Not: + await WriteCore(((NpgsqlTsQueryNot)node).Child).ConfigureAwait(false); + return; + case Phrase: + writer.WriteInt16(((NpgsqlTsQueryFollowedBy)node).Distance); + break; + } + + await WriteCore(((NpgsqlTsQueryBinOp)node).Right).ConfigureAwait(false); + await WriteCore(((NpgsqlTsQueryBinOp)node).Left).ConfigureAwait(false); + } + } + + int GetTokenCount(NpgsqlTsQuery node) + => node.Kind switch + { + Lexeme => 1, + And or Or or Phrase => 1 + GetTokenCount(((NpgsqlTsQueryBinOp)node).Left) + GetTokenCount(((NpgsqlTsQueryBinOp)node).Right), + Not => 1 + GetTokenCount(((NpgsqlTsQueryNot)node).Child), + Empty => 0, + + _ => throw new UnreachableException( + $"Internal Npgsql bug: unexpected value {node.Kind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug.") + }; +} diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs new file mode 100644 index 0000000000..04b16b80f5 --- /dev/null +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs @@ -0,0 +1,107 @@ +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TsVectorConverter(Encoding encoding) : PgStreamingConverter +{ + public override NpgsqlTsVector Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var numLexemes = reader.ReadInt32(); + var lexemes = new List(numLexemes); + + for (var i = 0; i < numLexemes; i++) + { + var lexemeString = async + ? await reader.ReadNullTerminatedStringAsync(encoding, cancellationToken).ConfigureAwait(false) + : reader.ReadNullTerminatedString(encoding); + + if (reader.ShouldBuffer(sizeof(short))) + await reader.Buffer(async, sizeof(short), cancellationToken).ConfigureAwait(false); + var numPositions = reader.ReadInt16(); + + if (numPositions == 0) + { + lexemes.Add(new NpgsqlTsVector.Lexeme(lexemeString, wordEntryPositions: null, noCopy: true)); + continue; + } + + // There can only be a maximum of 256 positions, so we just before them all (256 * sizeof(short) = 512) + if (numPositions > 256) + throw new NpgsqlException($"Got {numPositions} lexeme positions when reading tsvector"); + + if (reader.ShouldBuffer(numPositions * sizeof(short))) + await reader.Buffer(async, numPositions * sizeof(short), cancellationToken).ConfigureAwait(false); + + var positions = new List(numPositions); + + for (var j = 0; j < numPositions; j++) + { + var wordEntryPos = reader.ReadInt16(); + positions.Add(new NpgsqlTsVector.Lexeme.WordEntryPos(wordEntryPos)); + } + + lexemes.Add(new NpgsqlTsVector.Lexeme(lexemeString, positions, noCopy: true)); + } + + return new NpgsqlTsVector(lexemes, noCheck: true); + } + + public override Size GetSize(SizeContext context, NpgsqlTsVector value, ref object? writeState) + { + var size = 4; + foreach (var l in value) + size += encoding.GetByteCount(l.Text) + 1 + 2 + l.Count * 2; + + return size; + } + + public override void Write(PgWriter writer, NpgsqlTsVector value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlTsVector value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlTsVector value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + foreach (var lexeme in value) + { + if (async) + await writer.WriteCharsAsync(lexeme.Text.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(lexeme.Text.AsMemory().Span, encoding); + + if (writer.ShouldFlush(sizeof(byte) + sizeof(short))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte(0); + writer.WriteInt16((short)lexeme.Count); + + for (var i = 0; i < lexeme.Count; i++) + { + if (writer.ShouldFlush(sizeof(short))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt16(lexeme[i].Value); + } + } + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs b/src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs new file mode 100644 index 0000000000..4a7578afba --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs @@ -0,0 +1,26 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class BoxConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 4); + return format is DataFormat.Binary; + } + + protected override NpgsqlBox ReadCore(PgReader reader) + => new( + new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble()), + new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); + + protected override void WriteCore(PgWriter writer, NpgsqlBox value) + { + writer.WriteDouble(value.Right); + writer.WriteDouble(value.Top); + writer.WriteDouble(value.Left); + writer.WriteDouble(value.Bottom); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs b/src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs new file mode 100644 index 0000000000..51eea75814 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs @@ -0,0 +1,23 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class CircleConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 3); + return format is DataFormat.Binary; + } + + protected override NpgsqlCircle ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlCircle value) + { + writer.WriteDouble(value.X); + writer.WriteDouble(value.Y); + writer.WriteDouble(value.Radius); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs b/src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs new file mode 100644 index 0000000000..05b539cf12 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs @@ -0,0 +1,87 @@ +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class CubeConverter : PgStreamingConverter +{ + const uint PointBit = 0x80000000; + const int DimMask = 0x7fffffff; + + public override NpgsqlCube Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var header = reader.ReadInt32(); + var dim = header & DimMask; + var point = (header & PointBit) != 0; + + var lowerLeft = new double[dim]; + for (var i = 0; i < dim; i++) + { + if (reader.ShouldBuffer(sizeof(double))) + await reader.Buffer(async, sizeof(double), cancellationToken).ConfigureAwait(false); + lowerLeft[i] = reader.ReadDouble(); + } + + if (point) + return new NpgsqlCube(lowerLeft); + + var upperRight = new double[dim]; + for (var i = 0; i < dim; i++) + { + if (reader.ShouldBuffer(sizeof(double))) + await reader.Buffer(async, sizeof(double), cancellationToken).ConfigureAwait(false); + upperRight[i] = reader.ReadDouble(); + } + + return new NpgsqlCube(lowerLeft, upperRight); + } + + public override Size GetSize(SizeContext context, NpgsqlCube value, ref object? writeState) + => sizeof(int) + sizeof(double) * (value.IsPoint ? value.Dimensions : value.Dimensions * 2); + + public override void Write(PgWriter writer, NpgsqlCube value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlCube value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlCube value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var header = value.Dimensions; + if (value.IsPoint) + header |= 1 << 31; + + writer.WriteInt32(header); + + for (var i = 0; i < value.Dimensions; i++) + { + if (writer.ShouldFlush(sizeof(double))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(value.LowerLeft[i]); + } + + if (value.IsPoint) + return; + + for (var i = 0; i < value.Dimensions; i++) + { + if (writer.ShouldFlush(sizeof(double))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(value.UpperRight[i]); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/LineConverter.cs b/src/Npgsql/Internal/Converters/Geometric/LineConverter.cs new file mode 100644 index 0000000000..17d89909b9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/LineConverter.cs @@ -0,0 +1,23 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class LineConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 3); + return format is DataFormat.Binary; + } + + protected override NpgsqlLine ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlLine value) + { + writer.WriteDouble(value.A); + writer.WriteDouble(value.B); + writer.WriteDouble(value.C); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs b/src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs new file mode 100644 index 0000000000..117a108379 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs @@ -0,0 +1,24 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class LineSegmentConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 4); + return format is DataFormat.Binary; + } + + protected override NpgsqlLSeg ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlLSeg value) + { + writer.WriteDouble(value.Start.X); + writer.WriteDouble(value.Start.Y); + writer.WriteDouble(value.End.X); + writer.WriteDouble(value.End.Y); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs new file mode 100644 index 0000000000..0481037254 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs @@ -0,0 +1,68 @@ +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PathConverter : PgStreamingConverter +{ + public override NpgsqlPath Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(byte) + sizeof(int))) + await reader.Buffer(async, sizeof(byte) + sizeof(int), cancellationToken).ConfigureAwait(false); + + var open = reader.ReadByte() switch + { + 1 => false, + 0 => true, + _ => throw new UnreachableException("Error decoding binary geometric path: bad open byte") + }; + + var numPoints = reader.ReadInt32(); + var result = new NpgsqlPath(numPoints, open); + + for (var i = 0; i < numPoints; i++) + { + if (reader.ShouldBuffer(sizeof(double) * 2)) + await reader.Buffer(async, sizeof(double) * 2, cancellationToken).ConfigureAwait(false); + + result.Add(new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); + } + + return result; + } + + public override Size GetSize(SizeContext context, NpgsqlPath value, ref object? writeState) + => 5 + value.Count * sizeof(double) * 2; + + public override void Write(PgWriter writer, NpgsqlPath value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlPath value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlPath value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(byte) + sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte((byte)(value.Open ? 0 : 1)); + writer.WriteInt32(value.Count); + + foreach (var p in value) + { + if (writer.ShouldFlush(sizeof(double) * 2)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(p.X); + writer.WriteDouble(p.Y); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/PointConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PointConverter.cs new file mode 100644 index 0000000000..03e84c05bd --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/PointConverter.cs @@ -0,0 +1,22 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PointConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 2); + return format is DataFormat.Binary; + } + + protected override NpgsqlPoint ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlPoint value) + { + writer.WriteDouble(value.X); + writer.WriteDouble(value.Y); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs new file mode 100644 index 0000000000..9a889b4323 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs @@ -0,0 +1,55 @@ +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PolygonConverter : PgStreamingConverter +{ + public override NpgsqlPolygon Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var numPoints = reader.ReadInt32(); + var result = new NpgsqlPolygon(numPoints); + for (var i = 0; i < numPoints; i++) + { + if (reader.ShouldBuffer(sizeof(double) * 2)) + await reader.Buffer(async, sizeof(double) * 2, cancellationToken).ConfigureAwait(false); + result.Add(new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); + } + + return result; + } + + public override Size GetSize(SizeContext context, NpgsqlPolygon value, ref object? writeState) + => 4 + value.Count * sizeof(double) * 2; + + public override void Write(PgWriter writer, NpgsqlPolygon value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlPolygon value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlPolygon value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + foreach (var p in value) + { + if (writer.ShouldFlush(sizeof(double) * 2)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(p.X); + writer.WriteDouble(p.Y); + } + } +} diff --git a/src/Npgsql/Internal/Converters/HstoreConverter.cs b/src/Npgsql/Internal/Converters/HstoreConverter.cs new file mode 100644 index 0000000000..f9514450f7 --- /dev/null +++ b/src/Npgsql/Internal/Converters/HstoreConverter.cs @@ -0,0 +1,152 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class HstoreConverter(Encoding encoding, Func>, T>? convert = null) + : PgStreamingConverter + where T : ICollection> +{ + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + // Number of lengths (count, key length, value length). + var totalSize = sizeof(int) + value.Count * (sizeof(int) + sizeof(int)); + if (value.Count is 0) + return totalSize; + + var arrayPool = ArrayPool<(Size Size, object? WriteState)>.Shared; + var data = arrayPool.Rent(value.Count * 2); + + var i = 0; + foreach (var kv in value) + { + if (kv.Key is null) + throw new ArgumentException("Hstore doesn't support null keys", nameof(value)); + + var keySize = encoding.GetByteCount(kv.Key); + var valueSize = kv.Value is null ? -1 : encoding.GetByteCount(kv.Value); + totalSize += keySize + (valueSize is -1 ? 0 : valueSize); + data[i] = (keySize, null); + data[i + 1] = (valueSize, null); + i += 2; + } + writeState = new WriteState + { + ArrayPool = arrayPool, + Data = new(data, 0, value.Count * 2), + AnyWriteState = false + }; + return totalSize; + } + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var count = reader.ReadInt32(); + + var result = typeof(T) == typeof(Dictionary) || typeof(T) == typeof(IDictionary) + ? (ICollection>)new Dictionary(count) + : new List>(count); + + for (var i = 0; i < count; i++) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var keySize = reader.ReadInt32(); + var key = encoding.GetString(async + ? await reader.ReadBytesAsync(keySize, cancellationToken).ConfigureAwait(false) + : reader.ReadBytes(keySize) + ); + + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var valueSize = reader.ReadInt32(); + string? value = null; + if (valueSize is not -1) + value = encoding.GetString(async + ? await reader.ReadBytesAsync(valueSize, cancellationToken).ConfigureAwait(false) + : reader.ReadBytes(valueSize) + ); + + result.Add(new(key, value)); + } + + if (typeof(T) == typeof(Dictionary) || typeof(T) == typeof(IDictionary)) + return (T)result; + + return convert is null ? throw new NotSupportedException() : convert(result); + } + + async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) + { + if (writer.Current.WriteState is not WriteState && value.Count is not 0) + throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}."); + + // Number of lengths (count, key length, value length). + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + if (value.Count is 0 || writer.Current.WriteState is not WriteState writeState) + return; + + var data = writeState.Data; + var i = data.Offset; + foreach (var kv in value) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var (size, _) = data.Array![i]; + if (size.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var length = size.Value; + writer.WriteInt32(length); + if (async) + await writer.WriteCharsAsync(kv.Key.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(kv.Key.AsSpan(), encoding); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var (valueSize, _) = data.Array![i + 1]; + if (valueSize.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var valueLength = valueSize.Value; + writer.WriteInt32(valueLength); + if (valueLength is not -1) + { + if (async) + await writer.WriteCharsAsync(kv.Value.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(kv.Value.AsSpan(), encoding); + } + i += 2; + } + } + + sealed class WriteState : MultiWriteState + { + } +} diff --git a/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs b/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs new file mode 100644 index 0000000000..881d454d3a --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs @@ -0,0 +1,17 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class InternalCharConverter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(byte)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadByte()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteByte(byte.CreateChecked(value)); +} diff --git a/src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs b/src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs new file mode 100644 index 0000000000..96730c857a --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs @@ -0,0 +1,15 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PgLsnConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(ulong)); + return format is DataFormat.Binary; + } + protected override NpgsqlLogSequenceNumber ReadCore(PgReader reader) => new(reader.ReadUInt64()); + protected override void WriteCore(PgWriter writer, NpgsqlLogSequenceNumber value) => writer.WriteUInt64((ulong)value); +} diff --git a/src/Npgsql/Internal/Converters/Internal/TidConverter.cs b/src/Npgsql/Internal/Converters/Internal/TidConverter.cs new file mode 100644 index 0000000000..747d98fe17 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/TidConverter.cs @@ -0,0 +1,19 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TidConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(uint) + sizeof(ushort)); + return format is DataFormat.Binary; + } + protected override NpgsqlTid ReadCore(PgReader reader) => new(reader.ReadUInt32(), reader.ReadUInt16()); + protected override void WriteCore(PgWriter writer, NpgsqlTid value) + { + writer.WriteUInt32(value.BlockNumber); + writer.WriteUInt16(value.OffsetNumber); + } +} diff --git a/src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs b/src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs new file mode 100644 index 0000000000..92061b1fd2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs @@ -0,0 +1,13 @@ +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class UInt32Converter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(uint)); + return format is DataFormat.Binary; + } + protected override uint ReadCore(PgReader reader) => reader.ReadUInt32(); + protected override void WriteCore(PgWriter writer, uint value) => writer.WriteUInt32(value); +} diff --git a/src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs b/src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs new file mode 100644 index 0000000000..fcf5e3695a --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs @@ -0,0 +1,13 @@ +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class UInt64Converter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(ulong)); + return format is DataFormat.Binary; + } + protected override ulong ReadCore(PgReader reader) => reader.ReadUInt64(); + protected override void WriteCore(PgWriter writer, ulong value) => writer.WriteUInt64(value); +} diff --git a/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs b/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs new file mode 100644 index 0000000000..a91c39ae9b --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs @@ -0,0 +1,16 @@ +using System; + +namespace Npgsql.Internal.Converters.Internal; + +// Void is not a value so we read it as a null reference, not a DBNull. +sealed class VoidConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(0); + return true; + } + + protected override object? ReadCore(PgReader reader) => null; + protected override void WriteCore(PgWriter writer, object? value) => throw new NotSupportedException(); +} diff --git a/src/Npgsql/Internal/Converters/JsonConverter.cs b/src/Npgsql/Internal/Converters/JsonConverter.cs new file mode 100644 index 0000000000..074575e4e1 --- /dev/null +++ b/src/Npgsql/Internal/Converters/JsonConverter.cs @@ -0,0 +1,201 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization.Metadata; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class JsonConverter : PgStreamingConverter where T: TBase? +{ + readonly bool _jsonb; + readonly Encoding _textEncoding; + readonly JsonTypeInfo _jsonTypeInfo; + readonly JsonTypeInfo? _objectTypeInfo; + + public JsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions) + { + if (serializerOptions.TypeInfoResolver is null) + throw new InvalidOperationException("System.Text.Json serialization requires a type info resolver, make sure to set-it up beforehand."); + + _jsonb = jsonb; + _textEncoding = textEncoding; + _jsonTypeInfo = typeof(TBase) != typeof(object) && typeof(T) != typeof(TBase) + ? (JsonTypeInfo)serializerOptions.GetTypeInfo(typeof(TBase)) + : (JsonTypeInfo)serializerOptions.GetTypeInfo(typeof(T)); + // Unspecified polymorphism, let STJ handle it. + _objectTypeInfo = typeof(TBase) == typeof(object) + ? (JsonTypeInfo)serializerOptions.GetTypeInfo(typeof(object)) + : null; + } + + public override T? Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (_jsonb && reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + // We always fall back to buffers on older targets due to the absence of transcoding stream. + if (JsonConverter.TryReadStream(_jsonb, _textEncoding, reader, out var byteCount, out var stream)) + { + using var _ = stream; + return _jsonTypeInfo switch + { + JsonTypeInfo => (T)(object)(async + ? await JsonDocument.ParseAsync(stream, cancellationToken: cancellationToken).ConfigureAwait(false) + : JsonDocument.Parse(stream)), + + JsonTypeInfo typeInfoOfT => async + ? await JsonSerializer.DeserializeAsync(stream, typeInfoOfT, cancellationToken).ConfigureAwait(false) + : JsonSerializer.Deserialize(stream, typeInfoOfT), + + _ => (T?)(async + ? await JsonSerializer.DeserializeAsync(stream, (JsonTypeInfo)_jsonTypeInfo, cancellationToken) + .ConfigureAwait(false) + : JsonSerializer.Deserialize(stream, (JsonTypeInfo)_jsonTypeInfo)) + }; + } + + var (rentedChars, rentedBytes) = await JsonConverter.ReadRentedBuffer(async, _textEncoding, byteCount, reader, cancellationToken).ConfigureAwait(false); + var result = _jsonTypeInfo switch + { + JsonTypeInfo => (T)(object)JsonDocument.Parse(rentedChars.AsMemory()), + JsonTypeInfo typeInfoOfT => JsonSerializer.Deserialize(rentedChars.AsSpan(), typeInfoOfT), + _ => (T?)JsonSerializer.Deserialize(rentedChars.AsSpan(), (JsonTypeInfo)_jsonTypeInfo) + }; + + ArrayPool.Shared.Return(rentedChars.Array!); + if (rentedBytes is not null) + ArrayPool.Shared.Return(rentedBytes); + + return result; + } + + public override Size GetSize(SizeContext context, T? value, ref object? writeState) + { + var capacity = 0; + if (typeof(T) == typeof(JsonDocument)) + capacity = ((JsonDocument?)(object?)value)?.RootElement.GetRawText().Length ?? 0; + var stream = new MemoryStream(capacity); + + // Mirroring ASP.NET Core serialization strategy https://github.com/dotnet/aspnetcore/issues/47548 + if (_objectTypeInfo is null) + JsonSerializer.Serialize(stream, value, (JsonTypeInfo)_jsonTypeInfo); + else + JsonSerializer.Serialize(stream, value, _objectTypeInfo); + + return JsonConverter.GetSizeCore(_jsonb, stream, _textEncoding, ref writeState); + } + + public override void Write(PgWriter writer, T? value) + => JsonConverter.Write(_jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) + => JsonConverter.Write(_jsonb, async: true, writer, cancellationToken); +} + +// Split out to avoid unnecessary code duplication. +static class JsonConverter +{ + public const byte JsonbProtocolVersion = 1; + // Largest value that is a power of 2 and a multiple of 4096 while staying under the large object heap threshold (85K). + const int StreamingThreshold = 65536; + + public static bool TryReadStream(bool jsonb, Encoding encoding, PgReader reader, out int byteCount, [NotNullWhen(true)]out Stream? stream) + { + if (jsonb) + { + var version = reader.ReadByte(); + if (version != JsonbProtocolVersion) + throw new InvalidCastException($"Unknown jsonb wire format version {version}"); + } + + var isUtf8 = encoding.CodePage == Encoding.UTF8.CodePage; + byteCount = reader.CurrentRemaining; + if (isUtf8 || byteCount >= StreamingThreshold) + { + stream = !isUtf8 + ? Encoding.CreateTranscodingStream(reader.GetStream(), encoding, Encoding.UTF8) + : reader.GetStream(); + } + else + stream = null; + + return stream is not null; + } + + public static async ValueTask<(ArraySegment RentedChars, byte[]? RentedBytes)> ReadRentedBuffer(bool async, Encoding encoding, int byteCount, PgReader reader, CancellationToken cancellationToken) + { + // Never utf8, but we may still be able to save a copy. + byte[]? rentedBuffer = null; + if (!reader.TryReadBytes(byteCount, out ReadOnlyMemory buffer)) + { + rentedBuffer = ArrayPool.Shared.Rent(byteCount); + if (async) + await reader.ReadBytesAsync(rentedBuffer.AsMemory(0, byteCount), cancellationToken).ConfigureAwait(false); + else + reader.ReadBytes(rentedBuffer.AsSpan(0, byteCount)); + buffer = rentedBuffer.AsMemory(0, byteCount); + } + + var charCount = encoding.GetCharCount(buffer.Span); + var chars = ArrayPool.Shared.Rent(charCount); + encoding.GetChars(buffer.Span, chars); + + return (new(chars, 0, charCount), rentedBuffer); + } + + public static Size GetSizeCore(bool jsonb, MemoryStream stream, Encoding encoding, ref object? writeState) + { + if (encoding.CodePage == Encoding.UTF8.CodePage) + { + writeState = stream; + return (int)stream.Length + (jsonb ? sizeof(byte) : 0); + } + + if (!stream.TryGetBuffer(out var buffer)) + throw new InvalidOperationException(); + + var bytes = encoding.GetBytes(Encoding.UTF8.GetChars(buffer.Array!, buffer.Offset, buffer.Count)); + writeState = bytes; + return bytes.Length + (jsonb ? sizeof(byte) : 0); + } + + public static async ValueTask Write(bool jsonb, bool async, PgWriter writer, CancellationToken cancellationToken) + { + if (jsonb) + { + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte(JsonbProtocolVersion); + } + + ArraySegment buffer; + switch (writer.Current.WriteState) + { + case MemoryStream stream: + if (!stream.TryGetBuffer(out buffer)) + throw new InvalidOperationException(); + break; + case byte[] bytes: + buffer = new ArraySegment(bytes); + break; + default: + throw new InvalidCastException($"Invalid state {writer.Current.WriteState?.GetType().FullName}."); + } + + if (async) + await writer.WriteBytesAsync(buffer.AsMemory(), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(buffer.AsSpan()); + } +} diff --git a/src/Npgsql/Internal/Converters/MoneyConverter.cs b/src/Npgsql/Internal/Converters/MoneyConverter.cs new file mode 100644 index 0000000000..2b6c078a84 --- /dev/null +++ b/src/Npgsql/Internal/Converters/MoneyConverter.cs @@ -0,0 +1,19 @@ +using System; +using System.Numerics; + +namespace Npgsql.Internal.Converters; + +sealed class MoneyConverter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => ConvertTo(new PgMoney(reader.ReadInt64())); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt64(ConvertFrom(value).GetValue()); + + static PgMoney ConvertFrom(T value) => new(decimal.CreateChecked(value)); + static T ConvertTo(PgMoney money) => T.CreateChecked(money.ToDecimal()); +} diff --git a/src/Npgsql/Internal/Converters/MultirangeConverter.cs b/src/Npgsql/Internal/Converters/MultirangeConverter.cs new file mode 100644 index 0000000000..2757c2933f --- /dev/null +++ b/src/Npgsql/Internal/Converters/MultirangeConverter.cs @@ -0,0 +1,139 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class MultirangeConverter : PgStreamingConverter + where T : IList + where TRange : notnull +{ + readonly PgConverter _rangeConverter; + readonly BufferRequirements _rangeRequirements; + + public MultirangeConverter(PgConverter rangeConverter) + { + if (!rangeConverter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Range subtype converter has to support the binary format to be compatible."); + _rangeRequirements = bufferRequirements; + _rangeConverter = rangeConverter; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var numRanges = reader.ReadInt32(); + var multirange = (T)(object)(typeof(T).IsArray ? new TRange[numRanges] : new List(numRanges)); + + for (var i = 0; i < numRanges; i++) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var length = reader.ReadInt32(); + Debug.Assert(length != -1); + + var scope = await reader.BeginNestedRead(async, length, _rangeRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + var range = async + ? await _rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : _rangeConverter.Read(reader); + + if (typeof(T).IsArray) + multirange[i] = range; + else + multirange.Add(range); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + + return multirange; + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + var arrayPool = ArrayPool<(Size Size, object? WriteState)>.Shared; + var data = arrayPool.Rent(value.Count); + + var totalSize = Size.Create(sizeof(int) + sizeof(int) * value.Count); + var anyWriteState = false; + for (var i = 0; i < value.Count; i++) + { + object? innerState = null; + var rangeSize = _rangeConverter.IsDbNullOrGetSize(context.Format, _rangeRequirements.Write, value[i], ref innerState); + anyWriteState = anyWriteState || innerState is not null; + // Ranges should never be NULL. + Debug.Assert(rangeSize.HasValue); + data[i] = new(rangeSize.Value, innerState); + totalSize = totalSize.Combine(rangeSize.Value); + } + + writeState = new WriteState + { + ArrayPool = arrayPool, + Data = new(data, 0, value.Count), + AnyWriteState = anyWriteState + }; + return totalSize; + } + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) + { + if (writer.Current.WriteState is not WriteState writeState) + throw new InvalidCastException($"Invalid state {writer.Current.WriteState?.GetType().FullName}."); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + var data = writeState.Data.Array!; + for (var i = 0; i < value.Count; i++) + { + if (writer.ShouldFlush(sizeof(int))) // Length + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var (size, state) = data[i]; + if (size.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var length = size.Value; + writer.WriteInt32(length); + if (length != -1) + { + using var _ = await writer.BeginNestedWrite(async, _rangeRequirements.Write, length, state, cancellationToken).ConfigureAwait(false); + if (async) + await _rangeConverter.WriteAsync(writer, value[i], cancellationToken).ConfigureAwait(false); + else + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + _rangeConverter.Write(writer, value[i]); + } + } + } + + sealed class WriteState : MultiWriteState + { + } +} diff --git a/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs new file mode 100644 index 0000000000..707bcd016b --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs @@ -0,0 +1,23 @@ +using System.Net; +using System.Net.Sockets; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class IPAddressConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => NpgsqlInetConverter.CanConvertImpl(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, IPAddress value, ref object? writeState) + => NpgsqlInetConverter.GetSizeImpl(context, value, ref writeState); + + protected override IPAddress ReadCore(PgReader reader) + => NpgsqlInetConverter.ReadImpl(reader, shouldBeCidr: false).Address; + + protected override void WriteCore(PgWriter writer, IPAddress value) + => NpgsqlInetConverter.WriteImpl( + writer, + (value, (byte)(value.AddressFamily == AddressFamily.InterNetwork ? 32 : 128)), + isCidr: false); +} diff --git a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs new file mode 100644 index 0000000000..a76e645d8c --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs @@ -0,0 +1,31 @@ +using System; +using System.Net; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class IPNetworkConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => NpgsqlInetConverter.CanConvertImpl(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, IPNetwork value, ref object? writeState) + => NpgsqlInetConverter.GetSizeImpl(context, value.BaseAddress, ref writeState); + + protected override IPNetwork ReadCore(PgReader reader) + { + var (ip, netmask) = NpgsqlInetConverter.ReadImpl(reader, shouldBeCidr: true); + return new(ip, netmask); + } + + protected override void WriteCore(PgWriter writer, IPNetwork value) + => NpgsqlInetConverter.WriteImpl( + writer, + ( + value.BaseAddress, + value.PrefixLength <= byte.MaxValue + ? (byte)value.PrefixLength + : throw new ArgumentOutOfRangeException(nameof(value), "IPNetwork.PrefixLength is too large to fit in a byte") + ), + isCidr: true); +} diff --git a/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs b/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs new file mode 100644 index 0000000000..d9c2aa46e8 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs @@ -0,0 +1,36 @@ +using System; +using System.Diagnostics; +using System.Net.NetworkInformation; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class MacaddrConverter(bool macaddr8) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = macaddr8 ? BufferRequirements.Create(Size.CreateUpperBound(8)) : BufferRequirements.CreateFixedSize(6); + return format is DataFormat.Binary; + } + + public override Size GetSize(SizeContext context, PhysicalAddress value, ref object? writeState) + => value.GetAddressBytes().Length; + + protected override PhysicalAddress ReadCore(PgReader reader) + { + var len = reader.CurrentRemaining; + Debug.Assert(len is 6 or 8); + + var bytes = new byte[len]; + reader.Read(bytes); + return new PhysicalAddress(bytes); + } + + protected override void WriteCore(PgWriter writer, PhysicalAddress value) + { + var bytes = value.GetAddressBytes(); + if (!macaddr8 && bytes.Length is not 6) + throw new ArgumentException("A macaddr value must be 6 bytes long."); + writer.WriteBytes(bytes); + } +} diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs new file mode 100644 index 0000000000..451fab4959 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs @@ -0,0 +1,24 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +#pragma warning disable CS0618 // NpgsqlCidr is obsolete +sealed class NpgsqlCidrConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => NpgsqlInetConverter.CanConvertImpl(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, NpgsqlCidr value, ref object? writeState) + => NpgsqlInetConverter.GetSizeImpl(context, value.Address, ref writeState); + + protected override NpgsqlCidr ReadCore(PgReader reader) + { + var (ip, netmask) = NpgsqlInetConverter.ReadImpl(reader, shouldBeCidr: true); + return new(ip, netmask); + } + + protected override void WriteCore(PgWriter writer, NpgsqlCidr value) + => NpgsqlInetConverter.WriteImpl(writer, (value.Address, value.Netmask), isCidr: true); +} +#pragma warning restore CS0618 diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs new file mode 100644 index 0000000000..ea0066c9de --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs @@ -0,0 +1,75 @@ +using System; +using System.Diagnostics; +using System.Net; +using System.Net.Sockets; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class NpgsqlInetConverter : PgBufferedConverter +{ + const byte IPv4 = 2; + const byte IPv6 = 3; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertImpl(format, out bufferRequirements); + + internal static bool CanConvertImpl(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(Size.CreateUpperBound(20)); + return format == DataFormat.Binary; + } + + public override Size GetSize(SizeContext context, NpgsqlInet value, ref object? writeState) + => GetSizeImpl(context, value.Address, ref writeState); + + internal static Size GetSizeImpl(SizeContext context, IPAddress ipAddress, ref object? writeState) + => ipAddress.AddressFamily switch + { + AddressFamily.InterNetwork => 8, + AddressFamily.InterNetworkV6 => 20, + _ => throw new InvalidCastException( + $"Can't handle IPAddress with AddressFamily {ipAddress.AddressFamily}, only InterNetwork or InterNetworkV6!") + }; + + protected override NpgsqlInet ReadCore(PgReader reader) + { + var (ip, netmask) = ReadImpl(reader, shouldBeCidr: false); + return new(ip, netmask); + } + + internal static (IPAddress Address, byte Netmask) ReadImpl(PgReader reader, bool shouldBeCidr) + { + _ = reader.ReadByte(); // addressFamily + var mask = reader.ReadByte(); // mask + + var isCidr = reader.ReadByte() == 1; + Debug.Assert(isCidr == shouldBeCidr); + + var numBytes = reader.ReadByte(); + Span bytes = stackalloc byte[numBytes]; + reader.Read(bytes); + return (new IPAddress(bytes), mask); + } + + protected override void WriteCore(PgWriter writer, NpgsqlInet value) + => WriteImpl(writer, (value.Address, value.Netmask), isCidr: false); + + internal static void WriteImpl(PgWriter writer, (IPAddress Address, byte Netmask) value, bool isCidr) + { + writer.WriteByte(value.Address.AddressFamily switch + { + AddressFamily.InterNetwork => IPv4, + AddressFamily.InterNetworkV6 => IPv6, + _ => throw new InvalidCastException( + $"Can't handle IPAddress with AddressFamily {value.Address.AddressFamily}, only InterNetwork or InterNetworkV6!") + }); + + writer.WriteByte(value.Netmask); + writer.WriteByte((byte)(isCidr ? 1 : 0)); // Ignored on server side + var bytes = value.Address.GetAddressBytes(); + writer.WriteByte((byte)bytes.Length); + writer.WriteBytes(bytes); + } +} diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs new file mode 100644 index 0000000000..48e324f59f --- /dev/null +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +// NULL writing is always responsibility of the caller writing the length, so there is not much we do here. +/// Special value converter to be able to use struct converters as System.Nullable converters, it delegates all behavior to the effective converter. +sealed class NullableConverter(PgConverter effectiveConverter) + : PgConverter(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) + where T : struct +{ + protected override bool IsDbNullValue(T? value, object? writeState) + => value is null || effectiveConverter.IsDbNull(value.GetValueOrDefault(), writeState); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => effectiveConverter.CanConvert(format, out bufferRequirements); + + public override T? Read(PgReader reader) + => effectiveConverter.Read(reader); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => this.ReadAsyncAsNullable(effectiveConverter, reader, cancellationToken); + + public override Size GetSize(SizeContext context, [DisallowNull]T? value, ref object? writeState) + => effectiveConverter.GetSize(context, value.GetValueOrDefault(), ref writeState); + + public override void Write(PgWriter writer, T? value) + => effectiveConverter.Write(writer, value.GetValueOrDefault()); + + public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) + => effectiveConverter.WriteAsync(writer, value.GetValueOrDefault(), cancellationToken); + + internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) + => effectiveConverter.ReadAsObject(async, reader, cancellationToken); + + internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + => effectiveConverter.WriteAsObject(async, writer, value, cancellationToken); +} + +sealed class NullableTypeInfoProvider(PgProviderTypeInfo effectiveTypeInfo) + : PgComposingTypeInfoProvider(effectiveTypeInfo.PgTypeId, effectiveTypeInfo) + where T : struct +{ + protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; + protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; + + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType) + { + requestedType = null; + return new NullableConverter((PgConverter)effectiveConcreteTypeInfo.Converter); + } + + protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState) + => value is not null + ? EffectiveTypeInfo.GetForValue(effectiveContext, value.GetValueOrDefault(), out writeState) + : null; +} diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs new file mode 100644 index 0000000000..fb755178d0 --- /dev/null +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -0,0 +1,129 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class ObjectConverter() : PgStreamingConverter(customDbNullPredicate: true) +{ + protected override bool IsDbNullValue(object? value, object? writeState) + { + var (concreteTypeInfo, effectiveState) = writeState switch + { + PgConcreteTypeInfo info => (info, (object?)null), + WriteState ws => (ws.ConcreteTypeInfo, ws.EffectiveState), + _ => throw new InvalidOperationException("writeState cannot be null, LateBoundTypeInfoProvider is expected to pre-populate it with concrete type info.") + }; + + return concreteTypeInfo.Converter.IsDbNullAsObject(value, effectiveState); + } + + public override object Read(PgReader reader) => throw new NotSupportedException(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) => throw new NotSupportedException(); + + public override Size GetSize(SizeContext context, object value, ref object? writeState) + { + var (concreteTypeInfo, effectiveState) = writeState switch + { + PgConcreteTypeInfo info => (info, (object?)null), + WriteState state => (state.ConcreteTypeInfo, state.EffectiveState), + _ => throw new InvalidOperationException("Invalid state") + }; + + if (!concreteTypeInfo.Converter.CanConvert(context.Format, out var bufferRequirements)) + { + ThrowHelper.ThrowNotSupportedException($"Resolved converter '{concreteTypeInfo.Converter.GetType()}' has to support the {context.Format} format to be compatible."); + return default; + } + + // Fixed size converters won't have a GetSize implementation. + if (bufferRequirements.Write.Kind is SizeKind.Exact) + return bufferRequirements.Write; + + var result = concreteTypeInfo.Converter.GetSizeAsObject(context, value, ref effectiveState); + if (effectiveState is not null) + { + if (writeState is WriteState s && !ReferenceEquals(s.EffectiveState, effectiveState)) + s.EffectiveState = effectiveState; + else + writeState = new WriteState { ConcreteTypeInfo = concreteTypeInfo, EffectiveState = effectiveState }; + } + + return result; + } + + public override void Write(PgWriter writer, object value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, object value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + var (concreteTypeInfo, effectiveState) = writer.Current.WriteState switch + { + PgConcreteTypeInfo info => (info, (object?)null), + WriteState state => (state.ConcreteTypeInfo, state.EffectiveState), + _ => throw new InvalidOperationException("Invalid state") + }; + + var found = concreteTypeInfo.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements); + Debug.Assert(found); + var writeRequirement = bufferRequirements.Write; + using var _ = await writer.BeginNestedWrite(async, writeRequirement, writer.Current.Size.Value, effectiveState, cancellationToken).ConfigureAwait(false); + await concreteTypeInfo.Converter.WriteAsObject(async, writer, value, cancellationToken).ConfigureAwait(false); + } + + internal sealed class WriteState : IDisposable + { + public required PgConcreteTypeInfo ConcreteTypeInfo { get; init; } + public required object? EffectiveState { get; set; } + + // EffectiveState may hold a pooled WriteState from the underlying concrete converter + // (composite, array, etc.). The outer DisposeWriteState on PgTypeInfo only sees this + // wrapper, so the wrapper is responsible for cascading disposal to the inner state. + public void Dispose() + { + if (EffectiveState is IDisposable disposable) + disposable.Dispose(); + } + } +} + +// TODO the goal is to allow this provider to return the underlying converter type info, but we're not there yet. +// At that point we don't need the ObjectConverter any longer. +sealed class LateBoundTypeInfoProvider(PgSerializerOptions options, PgTypeId typeId) : PgConcreteTypeInfoProvider +{ + readonly PgConcreteTypeInfo _defaultConcreteTypeInfo = new(options, new ObjectConverter(), typeId); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + { + // Late binding is only supported when we've decided on a type id, so the provider's nominal typeId is the only + // legitimate answer. Upstream PgProviderTypeInfo.GetDefaultConcreteTypeInfo already throws on a mismatched id. + // Meaning, pgTypeId is either null or equal to typeId, and either way we return the cached info. + Debug.Assert(pgTypeId is null || pgTypeId == typeId); + return _defaultConcreteTypeInfo; + } + + protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) + { + if (value is null or DBNull) + { + writeState = options.UnspecifiedDBNullTypeInfo; + return GetDefaultCore(context.ExpectedPgTypeId); + } + + var valueType = value.GetType(); + var typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, context.ExpectedPgTypeId ?? typeId, options); + var concreteTypeInfo = typeInfo.MakeConcreteForValueAsObject(value, out var effectiveState); + if (!concreteTypeInfo.SupportsWriting) + AdoSerializerHelpers.ThrowWritingNotSupported(valueType, options, concreteTypeInfo.PgTypeId, resolved: true); + writeState = effectiveState is not null + ? new ObjectConverter.WriteState { ConcreteTypeInfo = concreteTypeInfo, EffectiveState = effectiveState } + : concreteTypeInfo; + + return GetDefault(context.ExpectedPgTypeId); + } +} diff --git a/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs b/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs new file mode 100644 index 0000000000..b52b15a266 --- /dev/null +++ b/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs @@ -0,0 +1,51 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +// Many ways to achieve exact-type composition on top of a polymorphic element type. +// Including pushing construction through a GVM visitor pattern on the element handler, +// manual reimplementation of the element logic in the array provider, and other ways. +// This one however is by far the most lightweight on both the implementation duplication and code bloat axes. +sealed class PolymorphicArrayTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgTypeId _pgTypeId; + readonly PgProviderTypeInfo _elementTypeInfo; + readonly Func _elementToArrayConverterFactory; + readonly PgTypeId _elementPgTypeId; + readonly ConcurrentDictionary _concreteInfoCache = new(ReferenceEqualityComparer.Instance); + + public PolymorphicArrayTypeInfoProvider(PgTypeId pgTypeId, PgProviderTypeInfo elementTypeInfo, Func elementToArrayConverterFactory) + { + if (elementTypeInfo.PgTypeId is null) + throw new ArgumentException("Type info cannot have an undecided PgTypeId.", nameof(elementTypeInfo)); + + _pgTypeId = pgTypeId; + _elementTypeInfo = elementTypeInfo; + _elementToArrayConverterFactory = elementToArrayConverterFactory; + _elementPgTypeId = elementTypeInfo.PgTypeId!.Value; + } + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + => GetOrAdd(_elementTypeInfo.GetDefault(_elementPgTypeId)); + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported."); + + protected override PgConcreteTypeInfo? GetForFieldCore(Field field) + { + var elementConcreteTypeInfo = _elementTypeInfo.GetForField(field with { PgTypeId = _elementPgTypeId }); + return elementConcreteTypeInfo is not null ? GetOrAdd(elementConcreteTypeInfo) : null; + } + + PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo elementConcreteTypeInfo) + { + (PolymorphicArrayTypeInfoProvider Instance, PgConcreteTypeInfo ConcreteInfo) state = (this, elementConcreteTypeInfo); + return _concreteInfoCache.GetOrAdd(elementConcreteTypeInfo, + static (_, state) => + new(state.ConcreteInfo.Options, state.Instance._elementToArrayConverterFactory(state.ConcreteInfo), state.Instance._pgTypeId), + state); + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs b/src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs new file mode 100644 index 0000000000..196877ad0e --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs @@ -0,0 +1,13 @@ +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class BoolConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(byte)); + return format is DataFormat.Binary; + } + protected override bool ReadCore(PgReader reader) => reader.ReadByte() is not 0; + protected override void WriteCore(PgWriter writer, bool value) => writer.WriteByte((byte)(value ? 1 : 0)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs new file mode 100644 index 0000000000..903dd15dec --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -0,0 +1,105 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +abstract class ByteaConverters(bool supportsTextFormat) : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return supportsTextFormat + ? format is DataFormat.Binary or DataFormat.Text + : format is DataFormat.Binary; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => ConvertTo(value).Length; + + public override void Write(PgWriter writer, T value) + => writer.WriteBytes(ConvertTo(value).Span); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteBytesAsync(ConvertTo(value), cancellationToken); + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + var bytes = new byte[reader.CurrentRemaining]; + if (async) + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + else + reader.ReadBytes(bytes); + + return ConvertFrom(new(bytes)); + } + + protected abstract Memory ConvertTo(T value); + protected abstract T ConvertFrom(Memory value); +} + +sealed class ArraySegmentByteaConverter(bool supportsTextFormat) : ByteaConverters>(supportsTextFormat) +{ + protected override Memory ConvertTo(ArraySegment value) => value; + protected override ArraySegment ConvertFrom(Memory value) + => MemoryMarshal.TryGetArray(value, out var segment) + ? segment + : throw new UnreachableException("Expected array-backed memory"); +} + +sealed class ArrayByteaConverter(bool supportsTextFormat) : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return supportsTextFormat + ? format is DataFormat.Binary or DataFormat.Text + : format is DataFormat.Binary; + } + + public override byte[] Read(PgReader reader) + { + var bytes = new byte[reader.CurrentRemaining]; + reader.ReadBytes(bytes); + return bytes; + } + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + var bytes = new byte[reader.CurrentRemaining]; + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + return bytes; + } + + public override Size GetSize(SizeContext context, byte[] value, ref object? writeState) + => value.Length; + + public override void Write(PgWriter writer, byte[] value) + => writer.WriteBytes(value); + + public override ValueTask WriteAsync(PgWriter writer, byte[] value, CancellationToken cancellationToken = default) + => writer.WriteBytesAsync(value, cancellationToken); +} + +sealed class ReadOnlyMemoryByteaConverter(bool supportsTextFormat) : ByteaConverters>(supportsTextFormat) +{ + protected override Memory ConvertTo(ReadOnlyMemory value) => MemoryMarshal.AsMemory(value); + protected override ReadOnlyMemory ConvertFrom(Memory value) => value; +} + +sealed class MemoryByteaConverter(bool supportsTextFormat) : ByteaConverters>(supportsTextFormat) +{ + protected override Memory ConvertTo(Memory value) => value; + protected override Memory ConvertFrom(Memory value) => value; +} diff --git a/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs b/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs new file mode 100644 index 0000000000..8bc9caaf67 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs @@ -0,0 +1,17 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DoubleConverter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadDouble()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteDouble(double.CreateChecked(value)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs new file mode 100644 index 0000000000..18e6b0edc5 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs @@ -0,0 +1,24 @@ +using System; +using System.Buffers.Binary; +using System.Runtime.InteropServices; + +namespace Npgsql.Internal.Converters; + +sealed class GuidUuidConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(16 * sizeof(byte)); + return format is DataFormat.Binary; + } + + protected override Guid ReadCore(PgReader reader) + => new(reader.ReadBytes(16).FirstSpan, bigEndian: true); + + protected override void WriteCore(PgWriter writer, Guid value) + { + Span bytes = stackalloc byte[16]; + value.TryWriteBytes(bytes, bigEndian: true, out _); + writer.WriteBytes(bytes); + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs new file mode 100644 index 0000000000..741af9a75e --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs @@ -0,0 +1,17 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class Int2Converter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(short)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt16()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt16(short.CreateChecked(value)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs new file mode 100644 index 0000000000..4327d2f2e7 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs @@ -0,0 +1,17 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class Int4Converter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt32()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt32(int.CreateChecked(value)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs new file mode 100644 index 0000000000..09a54cf265 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs @@ -0,0 +1,17 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class Int8Converter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt64()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt64(long.CreateChecked(value)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs new file mode 100644 index 0000000000..7d61d677e9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -0,0 +1,212 @@ +using System; +using System.Buffers; +using System.Numerics; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class BigIntegerNumericConverter : PgStreamingConverter +{ + const int StackAllocByteThreshold = 64 * sizeof(uint); + + public override BigInteger Read(PgReader reader) + { + if (reader.ShouldBuffer(sizeof(short))) + reader.Buffer(sizeof(short)); + + var digitCount = reader.ReadInt16(); + short[]? digitsFromPool = null; + var digits = (digitCount <= StackAllocByteThreshold / sizeof(short) + ? stackalloc short[StackAllocByteThreshold / sizeof(short)] + : (digitsFromPool = ArrayPool.Shared.Rent(digitCount)).AsSpan()).Slice(0, digitCount); + + var value = ConvertTo(NumericConverter.Read(reader, digits)); + + if (digitsFromPool is not null) + ArrayPool.Shared.Return(digitsFromPool); + + return value; + } + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + // If we don't need a read and can read buffered we delegate to our sync read method which won't do IO in such a case. + if (!reader.ShouldBuffer(reader.CurrentRemaining)) + return new(Read(reader)); + + return AsyncCore(reader, cancellationToken); + + static async ValueTask AsyncCore(PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(short))) + await reader.BufferAsync(sizeof(short), cancellationToken).ConfigureAwait(false); + + var digitCount = reader.ReadInt16(); + var digits = new ArraySegment(ArrayPool.Shared.Rent(digitCount), 0, digitCount); + var value = ConvertTo(await NumericConverter.ReadAsync(reader, digits, cancellationToken).ConfigureAwait(false)); + + ArrayPool.Shared.Return(digits.Array!); + + return value; + } + } + + public override Size GetSize(SizeContext context, BigInteger value, ref object? writeState) => + PgNumeric.GetByteCount(PgNumeric.GetDigitCount(value)); + + public override void Write(PgWriter writer, BigInteger value) + { + // We don't know how many digits we need so we allocate a decent chunk of stack for the builder to use. + // If it's not enough for the builder will do a heap allocation (for decimal it's always enough). + Span destination = stackalloc short[StackAllocByteThreshold / sizeof(short)]; + var numeric = ConvertFrom(value, destination); + NumericConverter.Write(writer, numeric); + } + + public override ValueTask WriteAsync(PgWriter writer, BigInteger value, CancellationToken cancellationToken = default) + { + if (writer.ShouldFlush(writer.Current.Size.Value)) + return AsyncCore(writer, value, cancellationToken); + + // If we don't need a flush and can write buffered we delegate to our sync write method which won't flush in such a case. + Write(writer, value); + return new(); + + static async ValueTask AsyncCore(PgWriter writer, BigInteger value, CancellationToken cancellationToken) + { + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + var numeric = ConvertFrom(value, Array.Empty()).Build(); + await NumericConverter.WriteAsync(writer, numeric, cancellationToken).ConfigureAwait(false); + } + } + + static PgNumeric.Builder ConvertFrom(BigInteger value, Span destination) => new(value, destination); + static BigInteger ConvertTo(in PgNumeric.Builder numeric) => numeric.ToBigInteger(); + static BigInteger ConvertTo(in PgNumeric numeric) => numeric.ToBigInteger(); +} + +sealed class DecimalNumericConverter : PgBufferedConverter where T : INumberBase +{ + const int StackAllocByteThreshold = 64 * sizeof(uint); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + // This upper bound would already cause an overflow exception in the builder, no need to do + 1. + bufferRequirements = BufferRequirements.Create(Size.CreateUpperBound(NumericConverter.DecimalBasedMaxByteCount)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) + { + var digitCount = reader.ReadInt16(); + var digits = stackalloc short[StackAllocByteThreshold / sizeof(short)].Slice(0, digitCount);; + var value = ConvertTo(NumericConverter.Read(reader, digits)); + return value; + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) => + PgNumeric.GetByteCount(default(T) switch + { + _ when typeof(decimal) == typeof(T) => PgNumeric.GetDigitCount((decimal)(object)value), + _ when typeof(short) == typeof(T) => PgNumeric.GetDigitCount((decimal)(short)(object)value), + _ when typeof(int) == typeof(T) => PgNumeric.GetDigitCount((decimal)(int)(object)value), + _ when typeof(long) == typeof(T) => PgNumeric.GetDigitCount((decimal)(long)(object)value), + _ when typeof(byte) == typeof(T) => PgNumeric.GetDigitCount((decimal)(byte)(object)value), + _ when typeof(sbyte) == typeof(T) => PgNumeric.GetDigitCount((decimal)(sbyte)(object)value), + _ when typeof(float) == typeof(T) => PgNumeric.GetDigitCount((decimal)(float)(object)value), + _ when typeof(double) == typeof(T) => PgNumeric.GetDigitCount((decimal)(double)(object)value), + _ => throw new NotSupportedException() + }); + + protected override void WriteCore(PgWriter writer, T value) + { + // We don't know how many digits we need so we allocate enough for the builder to use. + Span destination = stackalloc short[PgNumeric.Builder.MaxDecimalNumericDigits]; + var numeric = ConvertFrom(value, destination); + NumericConverter.Write(writer, numeric); + } + + static PgNumeric.Builder ConvertFrom(T value, Span destination) + => new(decimal.CreateChecked(value), destination); + + static T ConvertTo(in PgNumeric.Builder numeric) + => T.CreateChecked(numeric.ToDecimal()); +} + +static class NumericConverter +{ + public static readonly int DecimalBasedMaxByteCount = PgNumeric.GetByteCount(PgNumeric.Builder.MaxDecimalNumericDigits); + + public static PgNumeric.Builder Read(PgReader reader, Span digits) + { + var remainingStructureSize = PgNumeric.GetByteCount(0) - sizeof(short); + if (reader.ShouldBuffer(remainingStructureSize)) + reader.Buffer(remainingStructureSize); + var weight = reader.ReadInt16(); + var sign = reader.ReadInt16(); + var scale = reader.ReadInt16(); + foreach (ref var digit in digits) + { + if (reader.ShouldBuffer(sizeof(short))) + reader.Buffer(sizeof(short)); + digit = reader.ReadInt16(); + } + + return new PgNumeric.Builder(digits, weight, sign, scale); + } + + public static async ValueTask ReadAsync(PgReader reader, ArraySegment digits, CancellationToken cancellationToken) + { + var remainingStructureSize = PgNumeric.GetByteCount(0) - sizeof(short); + if (reader.ShouldBuffer(remainingStructureSize)) + await reader.BufferAsync(remainingStructureSize, cancellationToken).ConfigureAwait(false); + var weight = reader.ReadInt16(); + var sign = reader.ReadInt16(); + var scale = reader.ReadInt16(); + var array = digits.Array!; + for (var i = digits.Offset; i < digits.Offset + digits.Count; i++) + { + if (reader.ShouldBuffer(sizeof(short))) + await reader.BufferAsync(sizeof(short), cancellationToken).ConfigureAwait(false); + array[i] = reader.ReadInt16(); + } + + return new PgNumeric.Builder(digits, weight, sign, scale).Build(); + } + + public static void Write(PgWriter writer, PgNumeric.Builder numeric) + { + if (writer.ShouldFlush(PgNumeric.GetByteCount(0))) + writer.Flush(); + writer.WriteInt16((short)numeric.Digits.Length); + writer.WriteInt16(numeric.Weight); + writer.WriteInt16(numeric.Sign); + writer.WriteInt16(numeric.Scale); + + foreach (var digit in numeric.Digits) + { + if (writer.ShouldFlush(sizeof(short))) + writer.Flush(); + writer.WriteInt16(digit); + } + } + + public static async ValueTask WriteAsync(PgWriter writer, PgNumeric numeric, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(PgNumeric.GetByteCount(0))) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + writer.WriteInt16((short)numeric.Digits.Count); + writer.WriteInt16(numeric.Weight); + writer.WriteInt16(numeric.Sign); + writer.WriteInt16(numeric.Scale); + + foreach (var digit in numeric.Digits) + { + if (writer.ShouldFlush(sizeof(short))) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + writer.WriteInt16(digit); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs new file mode 100644 index 0000000000..bddbbda648 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs @@ -0,0 +1,56 @@ +using System; +using System.Diagnostics; +using System.Runtime.InteropServices; + +namespace Npgsql.Internal.Converters; + +readonly struct PgMoney +{ + const int DecimalBits = 4; + const int MoneyScale = 2; + readonly long _value; + + public PgMoney(long value) => _value = value; + + public PgMoney(decimal value) + { + if (value is < -92233720368547758.08M or > 92233720368547758.07M) + throw new OverflowException($"The supplied value '{value}' is outside the range for a PostgreSQL money value."); + + // No-op if scale was already 2 or less. + value = decimal.Round(value, MoneyScale, MidpointRounding.AwayFromZero); + + Span bits = stackalloc uint[DecimalBits]; + GetDecimalBits(value, bits, out var scale); + + var money = (long)bits[1] << 32 | bits[0]; + if (value < 0) + money = -money; + + // If we were less than scale 2, multiply. + _value = (MoneyScale - scale) switch + { + 1 => money * 10, + 2 => money * 100, + _ => money + }; + } + + public long GetValue() => _value; + + public decimal ToDecimal() + { + var result = new decimal(_value); + var scaleFactor = new decimal(1, 0, 0, false, MoneyScale); + result *= scaleFactor; + return result; + } + + static void GetDecimalBits(decimal value, Span destination, out short scale) + { + Debug.Assert(destination.Length >= DecimalBits); + + decimal.GetBits(value, MemoryMarshal.Cast(destination)); + scale = value.Scale; + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs new file mode 100644 index 0000000000..c90036d381 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -0,0 +1,397 @@ +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Diagnostics; +using System.Numerics; +using System.Runtime.InteropServices; +using static Npgsql.Internal.Converters.PgNumeric.Builder; + +namespace Npgsql.Internal.Converters; + +readonly struct PgNumeric(ArraySegment digits, short weight, short sign, short scale) +{ + // numeric digit count + weight + sign + scale + const int StructureByteCount = 4 * sizeof(short); + const int DecimalBits = 4; + const int StackAllocByteThreshold = 64 * sizeof(uint); + + readonly ushort _sign = (ushort)sign; + + /// Big endian array of numeric digits + public ArraySegment Digits { get; } = digits; + + public short Weight { get; } = weight; + public short Sign => (short)_sign; + public short Scale { get; } = scale; + + public int GetByteCount() => GetByteCount(Digits.Count); + public static int GetByteCount(int digitCount) => StructureByteCount + digitCount * sizeof(short); + + static void GetDecimalBits(decimal value, Span destination, out short scale) + { + Debug.Assert(destination.Length >= DecimalBits); + + decimal.GetBits(value, MemoryMarshal.Cast(destination)); + scale = value.Scale; + } + + public static int GetDigitCount(decimal value) + { + Span bits = stackalloc uint[DecimalBits]; + GetDecimalBits(value, bits, out var scale); + bits = bits.Slice(0, DecimalBits - 1); + return GetDigitCountCore(bits, scale); + } + + public static int GetDigitCount(BigInteger value) + { + var absValue = BigInteger.Abs(value); // isUnsigned: true fails for negative values. + var uintRoundedByteCount = (absValue.GetByteCount(isUnsigned: true) + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); + + byte[]? uintRoundedBitsFromPool = null; + var uintRoundedBits = (uintRoundedByteCount <= StackAllocByteThreshold + ? stackalloc byte[StackAllocByteThreshold] + : uintRoundedBitsFromPool = ArrayPool.Shared.Rent(uintRoundedByteCount) + ).Slice(0, uintRoundedByteCount); + // Fill the last uint worth of bytes as it may only be partially written to. + uintRoundedBits.Slice(uintRoundedBits.Length - sizeof(uint)).Fill(0); + + var success = absValue.TryWriteBytes(uintRoundedBits, out _, isUnsigned: true); + Debug.Assert(success); + + var uintBits = MemoryMarshal.Cast(uintRoundedBits); + if (!BitConverter.IsLittleEndian) + for (var i = 0; i < uintBits.Length; i++) + uintBits[i] = BinaryPrimitives.ReverseEndianness(uintBits[i]); + + var size = GetDigitCountCore(uintBits, scale: 0); + + if (uintRoundedBitsFromPool is not null) + ArrayPool.Shared.Return(uintRoundedBitsFromPool); + + return size; + } + + public decimal ToDecimal() => Builder.ToDecimal(Scale, Weight, _sign, Digits); + public BigInteger ToBigInteger() => Builder.ToBigInteger(Weight, _sign, Digits); + + public readonly ref struct Builder + { + const ushort SignPositive = 0x0000; + const ushort SignNegative = 0x4000; + const ushort SignNan = 0xC000; + const ushort SignPinf = 0xD000; + const ushort SignNinf = 0xF000; + + const uint NumericBase = 10000; + const int NumericBaseLog10 = 4; // log10(10000) + + internal const int MaxDecimalNumericDigits = 8; + + // Fast access for 10^n where n is 0-9 + static ReadOnlySpan UIntPowers10 => + [ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000 + ]; + + const int MaxUInt32Scale = 9; + const int MaxUInt16Scale = 4; + + public short Weight { get; } + + readonly ushort _sign; + public short Sign => (short)_sign; + + public short Scale { get; } + public Span Digits { get; } + readonly short[]? _digitsArray; + + public Builder(Span digits, short weight, short sign, short scale) + { + Digits = digits; + Weight = weight; + _sign = (ushort)sign; + Scale = scale; + } + + public Builder(short[] digits, short weight, short sign, short scale) + { + Digits = _digitsArray = digits; + Weight = weight; + _sign = (ushort)sign; + Scale = scale; + } + + [Conditional("DEBUG")] + static void AssertInvariants() + { + Debug.Assert(UIntPowers10.Length >= NumericBaseLog10); + Debug.Assert(NumericBase < short.MaxValue); + } + + static void Create(ref short[]? digitsArray, ref Span destination, scoped Span bits, short scale, out short weight, out int digitCount) + { + AssertInvariants(); + digitCount = 0; + var digitWeight = -scale / NumericBaseLog10 - 1; + + var bitsUpperBound = (bits.Length * (MaxUInt32Scale + 1) + MaxUInt16Scale - 1) / MaxUInt16Scale + 1; + if (bitsUpperBound > destination.Length) + destination = digitsArray = new short[bitsUpperBound]; + + // When the given scale does not sit on a numeric digit boundary we divide once by the remainder power of 10 instead of the base. + // As a result the quotient is aligned to a digit boundary, we must then scale up the remainder by the missed power of 10 to compensate. + var scaleRemainder = scale % NumericBaseLog10; + if (scaleRemainder > 0 && DivideInPlace(bits, UIntPowers10[scaleRemainder], out var remainder) && remainder != 0) + { + remainder *= UIntPowers10[NumericBaseLog10 - scaleRemainder]; + digitWeight--; + destination[destination.Length - 1 - digitCount++] = (short)remainder; + } + while (DivideInPlace(bits, NumericBase, out remainder)) + { + // Initial zero remainders are skipped as these present trailing zero digits, which should not be stored. + if (digitCount == 0 && remainder == 0) + digitWeight++; + else + // We store the results starting from the end so the final digits end up in big endian. + destination[destination.Length - 1 - digitCount++] = (short)remainder; + } + + weight = (short)(digitWeight + digitCount); + + } + + public Builder(decimal value, Span destination) + { + Span bits = stackalloc uint[DecimalBits]; + GetDecimalBits(value, bits, out var scale); + bits = bits.Slice(0, DecimalBits - 1); + + Create(ref _digitsArray, ref destination, bits, scale, out var weight, out var digitCount); + Digits = destination.Slice(destination.Length - digitCount); + Weight = weight; + _sign = value < 0 ? SignNegative : SignPositive; + Scale = scale; + } + + /// + /// + /// + /// + /// If the destination ends up being too small the builder allocates instead + public Builder(BigInteger value, Span destination) + { + var absValue = BigInteger.Abs(value); // isUnsigned: true fails for negative values. + var uintRoundedByteCount = (absValue.GetByteCount(isUnsigned: true) + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); + + byte[]? uintRoundedBitsFromPool = null; + var uintRoundedBits = (uintRoundedByteCount <= StackAllocByteThreshold + ? stackalloc byte[StackAllocByteThreshold] + : uintRoundedBitsFromPool = ArrayPool.Shared.Rent(uintRoundedByteCount) + ).Slice(0, uintRoundedByteCount); + // Fill the last uint worth of bytes as it may only be partially written to. + uintRoundedBits.Slice(uintRoundedBits.Length - sizeof(uint)).Fill(0); + + var success = absValue.TryWriteBytes(uintRoundedBits, out _, isUnsigned: true); + Debug.Assert(success); + var uintBits = MemoryMarshal.Cast(uintRoundedBits); + + // Our calculations are all done in little endian, meaning the least significant *uint* is first, just like in BigInteger. + // The bytes comprising every individual uint should still be converted to big endian though. + // As a result an array of bytes like [ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8 ] should become [ 0x4, 0x3, 0x2, 0x1, 0x8, 0x7, 0x6, 0x5 ]. + if (!BitConverter.IsLittleEndian) + for (var i = 0; i < uintBits.Length; i++) + uintBits[i] = BinaryPrimitives.ReverseEndianness(uintBits[i]); + + Create(ref _digitsArray, ref destination, uintBits, scale: 0, out var weight, out var digitCount); + Digits = destination.Slice(destination.Length - digitCount); + Weight = weight; + _sign = value < 0 ? SignNegative : SignPositive; + Scale = 0; + + if (uintRoundedBitsFromPool is not null) + ArrayPool.Shared.Return(uintRoundedBitsFromPool); + } + + public PgNumeric Build() + { + var digitsArray = _digitsArray is not null + ? new ArraySegment(_digitsArray, _digitsArray.Length - Digits.Length, Digits.Length) + : new ArraySegment(Digits.ToArray()); + + return new(digitsArray, Weight, Sign, Scale); + } + + public decimal ToDecimal() => ToDecimal(Scale, Weight, _sign, Digits); + public BigInteger ToBigInteger() => ToBigInteger(Weight, _sign, Digits); + + int DigitCount => Digits.Length; + + /// + /// + /// + /// + /// + /// + /// Whether the input consists of any non zero bits + static bool DivideInPlace(Span left, uint right, out uint remainder) + => Divide(left, right, left, out remainder); + + /// Adapted from BigInteger, to allow us to operate directly on stack allocated bits + static bool Divide(ReadOnlySpan left, uint right, Span quotient, out uint remainder) + { + Debug.Assert(quotient.Length == left.Length); + + // Executes the division for one big and one 32-bit integer. + // Thus, we've similar code than below, but there is no loop for + // processing the 32-bit integer, since it's a single element. + + var carry = 0UL; + + var nonZeroInput = false; + for (var i = left.Length - 1; i >= 0; i--) + { + var value = (carry << 32) | left[i]; + nonZeroInput = nonZeroInput || value != 0; + var digit = value / right; + quotient[i] = (uint)digit; + carry = value - digit * right; + } + remainder = (uint)carry; + + return nonZeroInput; + } + + internal static int GetDigitCountCore(Span bits, int scale) + { + AssertInvariants(); + // When a fractional result is expected we must send two numeric digits. + // When the given scale does not sit on a numeric digit boundary- + // we divide once by the remaining power of 10 instead of the full base to align things. + var baseLogRemainder = scale % NumericBaseLog10; + var den = baseLogRemainder > 0 ? UIntPowers10[baseLogRemainder] : NumericBase; + var digits = 0; + while (DivideInPlace(bits, den, out var remainder)) + { + den = NumericBase; + // Initial zero remainders are skipped as these present trailing zero digits, which should not be transmitted. + if (digits != 0 || remainder != 0) + digits++; + } + + return digits; + } + + internal static decimal ToDecimal(short scale, short weight, ushort sign, Span digits) + { + const int MaxUIntScale = 9; + const int MaxDecimalScale = 28; + + var digitCount = digits.Length; + if (digitCount > MaxDecimalNumericDigits) + throw new OverflowException("Numeric value does not fit in a System.Decimal"); + + if (Math.Abs(scale) > MaxDecimalScale) + throw new OverflowException("Numeric value does not fit in a System.Decimal"); + + var scaleFactor = new decimal(1, 0, 0, false, (byte)(scale > 0 ? scale : 0)); + if (digitCount == 0) + return sign switch + { + SignPositive or SignNegative => decimal.Zero * scaleFactor, + SignNan => throw new InvalidCastException("Numeric NaN not supported by System.Decimal"), + SignPinf => throw new InvalidCastException("Numeric Infinity not supported by System.Decimal"), + SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by System.Decimal"), + _ => throw new ArgumentOutOfRangeException() + }; + + var numericBase = new decimal(NumericBase); + var result = decimal.Zero; + for (var i = 0; i < digitCount - 1; i++) + { + result *= numericBase; + result += digits[i]; + } + + var digitScale = (weight + 1 - digitCount) * NumericBaseLog10; + var scaleDifference = scale < 0 ? digitScale : digitScale + scale; + + var digit = digits[digitCount - 1]; + if (digitCount == MaxDecimalNumericDigits) + { + // On the max group we adjust the base based on the scale difference, to prevent overflow for valid values. + var pow = UIntPowers10[-scaleDifference]; + result *= numericBase / pow; + result += new decimal(digit / pow); + } + else + { + result *= numericBase; + result += digit; + + if (scaleDifference < 0) + { + // Doesn't look like we can loop even once, but just to be on a safe side + while (scaleDifference < 0) + { + var scaleChunk = Math.Min(MaxUIntScale, -scaleDifference); + result /= UIntPowers10[scaleChunk]; + scaleDifference += scaleChunk; + } + } + else + { + while (scaleDifference > 0) + { + var scaleChunk = Math.Min(MaxUIntScale, scaleDifference); + scaleFactor *= UIntPowers10[scaleChunk]; + scaleDifference -= scaleChunk; + } + } + } + + result *= scaleFactor; + return sign == SignNegative ? -result : result; + } + + internal static BigInteger ToBigInteger(short weight, ushort sign, Span digits) + { + var digitCount = digits.Length; + if (digitCount == 0) + return sign switch + { + SignPositive or SignNegative => BigInteger.Zero, + SignNan => throw new InvalidCastException("Numeric NaN not supported by BigInteger"), + SignPinf => throw new InvalidCastException("Numeric Infinity not supported by BigInteger"), + SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by BigInteger"), + _ => throw new ArgumentOutOfRangeException() + }; + + var digitWeight = weight + 1 - digitCount; + if (digitWeight < 0) + throw new InvalidCastException("Numeric value with non-zero fractional digits not supported by BigInteger"); + + var numericBase = new BigInteger(NumericBase); + var result = BigInteger.Zero; + foreach (var digit in digits) + { + result *= numericBase; + result += new BigInteger(digit); + } + + var exponentCorrection = BigInteger.Pow(numericBase, digitWeight); + result *= exponentCorrection; + return sign == SignNegative ? -result : result; + } + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs b/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs new file mode 100644 index 0000000000..89eeebb7fe --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs @@ -0,0 +1,17 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class RealConverter : PgBufferedConverter where T : INumberBase +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(float)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadFloat()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteFloat(float.CreateChecked(value)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs new file mode 100644 index 0000000000..e1660e90a2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -0,0 +1,336 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +static class TextConverter +{ + public static PgConverter CreateStringConverter(Encoding encoding) + => new StringBasedTextConverter(encoding); + + public static PgConverter> CreateReadOnlyMemoryConverter(Encoding encoding) + => new StringBasedTextConverter, ReadOnlyMemoryConversion>(encoding); + + sealed class StringBasedTextConverter(Encoding encoding) : PgStreamingConverter + where TConv : struct, IStringConversion + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override T Read(PgReader reader) + { + var bytes = reader.ReadBytes(reader.CurrentRemaining); + return TConv.ConvertFrom( + ReferenceEquals(encoding, PgSerializerOptions.DefaultUtf8Encoding) + ? PgSerializerOptions.DefaultUtf8Encoding.GetString(bytes) + : encoding.GetString(bytes)); + } + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + var bytes = await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false); + return TConv.ConvertFrom( + ReferenceEquals(encoding, PgSerializerOptions.DefaultUtf8Encoding) + ? PgSerializerOptions.DefaultUtf8Encoding.GetString(bytes) + : encoding.GetString(bytes)); + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => TextConverterHelpers.GetSize(ref context, TConv.ConvertTo(value), encoding); + + public override void Write(PgWriter writer, T value) + => writer.WriteChars(TConv.ConvertTo(value).Span, encoding); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(TConv.ConvertTo(value), encoding, cancellationToken); + } + + interface IStringConversion + { + static abstract ReadOnlyMemory ConvertTo(T value); + static abstract T ConvertFrom(string value); + } + + struct ReadOnlyMemoryConversion : IStringConversion> + { + public static ReadOnlyMemory ConvertTo(ReadOnlyMemory value) => value; + public static ReadOnlyMemory ConvertFrom(string value) => value.AsMemory(); + } + + struct StringConversion : IStringConversion + { + public static ReadOnlyMemory ConvertTo(string value) => value.AsMemory(); + public static string ConvertFrom(string value) => value; + } +} + +abstract class ArrayBasedTextConverter(Encoding encoding) : PgStreamingConverter +{ + public override T Read(PgReader reader) + => Read(async: false, reader, encoding).GetAwaiter().GetResult(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, encoding); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => TextConverterHelpers.GetSize(ref context, ConvertTo(value), encoding); + + public override void Write(PgWriter writer, T value) + => writer.WriteChars(ConvertTo(value).AsSpan(), encoding); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(ConvertTo(value), encoding, cancellationToken); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + protected abstract ArraySegment ConvertTo(T value); + protected abstract T ConvertFrom(ArraySegment value); + + ValueTask Read(bool async, PgReader reader, Encoding encoding) + { + return async ? ReadAsync(reader, encoding) : new(ConvertFrom(GetSegment(reader.ReadBytes(reader.CurrentRemaining), encoding))); + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask ReadAsync(PgReader reader, Encoding encoding) + => ConvertFrom(GetSegment(await reader.ReadBytesAsync(reader.CurrentRemaining).ConfigureAwait(false), encoding)); + + static ArraySegment GetSegment(ReadOnlySequence bytes, Encoding encoding) + { + var array = TextConverterHelpers.GetChars(encoding, bytes); + return new(array, 0, array.Length); + } + } +} + +sealed class CharArraySegmentTextConverter(Encoding encoding) : ArrayBasedTextConverter>(encoding) +{ + protected override ArraySegment ConvertTo(ArraySegment value) => value; + protected override ArraySegment ConvertFrom(ArraySegment value) => value; +} + +sealed class CharArrayTextConverter(Encoding encoding) : ArrayBasedTextConverter(encoding) +{ + protected override ArraySegment ConvertTo(char[] value) => new(value, 0, value.Length); + protected override char[] ConvertFrom(ArraySegment value) + { + if (value.Array?.Length == value.Count) + return value.Array!; + + var array = new char[value.Count]; + Array.Copy(value.Array!, value.Offset, array, 0, value.Count); + return array; + } +} + +sealed class CharTextConverter(Encoding encoding) : PgBufferedConverter +{ + readonly Size _oneCharMaxByteCount = Size.CreateUpperBound(encoding.GetMaxByteCount(1)); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(_oneCharMaxByteCount); + return format is DataFormat.Binary or DataFormat.Text; + } + + protected override char ReadCore(PgReader reader) + { + var byteSeq = reader.ReadBytes(Math.Min(_oneCharMaxByteCount.Value, reader.CurrentRemaining)); + Debug.Assert(byteSeq.IsSingleSegment); + var bytes = byteSeq.FirstSpan; + + var chars = encoding.GetCharCount(bytes); + if (chars < 1) + throw new NpgsqlException("Could not read char - string was empty"); + + Span destination = stackalloc char[chars]; + encoding.GetChars(bytes, destination); + return destination[0]; + } + + public override Size GetSize(SizeContext context, char value, ref object? writeState) + { + Span spanValue = [value]; + return encoding.GetByteCount(spanValue); + } + + protected override void WriteCore(PgWriter writer, char value) + { + Span spanValue = [value]; + writer.WriteChars(spanValue, encoding); + } +} + +sealed class TextReaderTextConverter(Encoding encoding) : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override TextReader Read(PgReader reader) + => reader.GetTextReader(encoding); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => reader.GetTextReaderAsync(encoding, cancellationToken); + + public override Size GetSize(SizeContext context, TextReader value, ref object? writeState) => throw new NotImplementedException(); + public override void Write(PgWriter writer, TextReader value) => throw new NotImplementedException(); + public override ValueTask WriteAsync(PgWriter writer, TextReader value, CancellationToken cancellationToken = default) => throw new NotImplementedException(); +} + + +readonly struct GetChars(int read) +{ + public int Read { get; } = read; +} + +sealed class GetCharsTextConverter(Encoding encoding) : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override GetChars Read(PgReader reader) + => reader.CharsReadActive + ? ResumableRead(reader) + : throw new NotSupportedException(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => throw new NotSupportedException(); + + public override Size GetSize(SizeContext context, GetChars value, ref object? writeState) => throw new NotSupportedException(); + public override void Write(PgWriter writer, GetChars value) => throw new NotSupportedException(); + public override ValueTask WriteAsync(PgWriter writer, GetChars value, CancellationToken cancellationToken = default) => throw new NotSupportedException(); + + GetChars ResumableRead(PgReader reader) + { + reader.GetCharsReadInfo(encoding, out var charsRead, out var textReader, out var charsOffset, out var buffer); + + // With variable length encodings, moving backwards based on bytes means we have to start over. + if (charsRead > charsOffset) + { + reader.RestartCharsRead(); + charsRead = 0; + } + + // First seek towards the charsOffset. + // If buffer is null read the entire thing and report the length, see sql client remarks. + // https://learn.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqldatareader.getchars + var read = ConsumeChars(textReader, buffer is null ? null : charsOffset - charsRead); + Debug.Assert(buffer is null || read == charsOffset - charsRead); + reader.AdvanceCharsRead(read); + if (buffer is null) + return new(read); + + read = textReader.ReadBlock(buffer.GetValueOrDefault().Array!, buffer.GetValueOrDefault().Offset, buffer.GetValueOrDefault().Count); + reader.AdvanceCharsRead(read); + return new(read); + + static int ConsumeChars(TextReader reader, int? count) + { + if (count is 0) + return 0; + + const int maxStackAlloc = 512; + Span tempCharBuf = stackalloc char[maxStackAlloc]; + var totalRead = 0; + var fin = false; + while (!fin) + { + var toRead = count is null ? maxStackAlloc : Math.Min(maxStackAlloc, count.Value - totalRead); + var read = reader.ReadBlock(tempCharBuf.Slice(0, toRead)); + totalRead += read; + if (count is not null && read is 0) + throw new EndOfStreamException(); + + fin = count is null ? read is 0 : totalRead >= count; + } + return totalRead; + } + } +} + +// Moved out for code size/sharing. +static class TextConverterHelpers +{ + public static Size GetSize(ref SizeContext context, ReadOnlyMemory value, Encoding encoding) + => encoding.GetByteCount(value.Span); + + // Adapted version of GetString(ROSeq) removing the intermediate string allocation to make a contiguous char array. + public static char[] GetChars(Encoding encoding, ReadOnlySequence bytes) + { + if (bytes.IsSingleSegment) + { + // If the incoming sequence is single-segment, one-shot this. + var firstSpan = bytes.First.Span; + var chars = new char[encoding.GetCharCount(firstSpan)]; + encoding.GetChars(bytes.First.Span, chars); + return chars; + } + else + { + // If the incoming sequence is multi-segment, create a stateful Decoder + // and use it as the workhorse. On the final iteration we'll pass flush=true. + + var decoder = encoding.GetDecoder(); + + // Maintain a list of all the segments we'll need to concat together. + // These will be released back to the pool at the end of the method. + + var listOfSegments = new List<(char[], int)>(); + var totalCharCount = 0; + + var remainingBytes = bytes; + bool isFinalSegment; + + do + { + var firstSpan = remainingBytes.First.Span; + var next = remainingBytes.GetPosition(firstSpan.Length); + isFinalSegment = remainingBytes.IsSingleSegment; + + var charCountThisIteration = decoder.GetCharCount(firstSpan, flush: isFinalSegment); // could throw ArgumentException if overflow would occur + var rentedArray = ArrayPool.Shared.Rent(charCountThisIteration); + var actualCharsWrittenThisIteration = decoder.GetChars(firstSpan, rentedArray, flush: isFinalSegment); + listOfSegments.Add((rentedArray, actualCharsWrittenThisIteration)); + + totalCharCount += actualCharsWrittenThisIteration; + if (totalCharCount < 0) + throw new OutOfMemoryException(); + + remainingBytes = remainingBytes.Slice(next); + } while (!isFinalSegment); + + // Now build up the string to return, then release all of our scratch buffers + // back to the shared pool. + var chars = new char[totalCharCount]; + var span = chars.AsSpan(); + foreach (var (array, length) in listOfSegments) + { + array.AsSpan(0, length).CopyTo(span); + ArrayPool.Shared.Return(array); + span = span.Slice(length); + } + + return chars; + } + } +} diff --git a/src/Npgsql/Internal/Converters/RangeConverter.cs b/src/Npgsql/Internal/Converters/RangeConverter.cs new file mode 100644 index 0000000000..5a7b6df9f8 --- /dev/null +++ b/src/Npgsql/Internal/Converters/RangeConverter.cs @@ -0,0 +1,216 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +namespace Npgsql.Internal.Converters; + +sealed class RangeConverter : PgStreamingConverter> +{ + readonly PgConverter _subtypeConverter; + readonly BufferRequirements _subtypeRequirements; + + public RangeConverter(PgConverter subtypeConverter) + { + if (!subtypeConverter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Range subtype converter has to support the binary format to be compatible."); + _subtypeRequirements = bufferRequirements; + _subtypeConverter = subtypeConverter; + } + + public override NpgsqlRange Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask> ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask> Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + var flags = (RangeFlags)reader.ReadByte(); + if ((flags & RangeFlags.Empty) != 0) + return NpgsqlRange.Empty; + + var lowerBound = default(TSubtype); + var upperBound = default(TSubtype); + + var converter = _subtypeConverter; + if ((flags & RangeFlags.LowerBoundInfinite) == 0) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var length = reader.ReadInt32(); + + // Note that we leave the CLR default for nulls + if (length != -1) + { + var scope = await reader.BeginNestedRead(async, length, _subtypeRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + lowerBound = async + ? await converter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : converter.Read(reader); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + } + + if ((flags & RangeFlags.UpperBoundInfinite) == 0) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var length = reader.ReadInt32(); + + // Note that we leave the CLR default for nulls + if (length != -1) + { + var scope = await reader.BeginNestedRead(async, length, _subtypeRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + upperBound = async + ? await converter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : converter.Read(reader); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + } + + return new NpgsqlRange(lowerBound, upperBound, flags); + } + + public override Size GetSize(SizeContext context, NpgsqlRange value, ref object? writeState) + { + var totalSize = Size.Create(1); + if (value.IsEmpty) + return totalSize; // Just flags. + + WriteState? state = null; + if (!value.LowerBoundInfinite) + { + totalSize = totalSize.Combine(sizeof(int)); + var subTypeState = (object?)null; + if (_subtypeConverter.IsDbNullOrGetSize(context.Format, _subtypeRequirements.Write, value.LowerBound, ref subTypeState) is { } size) + { + totalSize = totalSize.Combine(size); + (state ??= new WriteState()).LowerBoundSize = size; + state.LowerBoundWriteState = subTypeState; + } + else if (state is not null) + state.LowerBoundSize = -1; + } + + if (!value.UpperBoundInfinite) + { + totalSize = totalSize.Combine(sizeof(int)); + var subTypeState = (object?)null; + if (_subtypeConverter.IsDbNullOrGetSize(context.Format, _subtypeRequirements.Write, value.UpperBound, ref subTypeState) is { } size) + { + totalSize = totalSize.Combine(size); + (state ??= new WriteState()).UpperBoundSize = size; + state.UpperBoundWriteState = subTypeState; + } + else if (state is not null) + state.UpperBoundSize = -1; + } + + writeState = state; + return totalSize; + } + + public override void Write(PgWriter writer, NpgsqlRange value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlRange value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlRange value, CancellationToken cancellationToken) + { + var writeState = writer.Current.WriteState as WriteState; + var lowerBoundSize = writeState?.LowerBoundSize ?? -1; + var upperBoundSize = writeState?.UpperBoundSize ?? -1; + + var flags = value.Flags; + if (!value.IsEmpty) + { + // Normalize nulls to infinite, as pg does. + if (lowerBoundSize == -1 && !value.LowerBoundInfinite) + flags = (flags & ~RangeFlags.LowerBoundInclusive) | RangeFlags.LowerBoundInfinite; + + if (upperBoundSize == -1 && !value.UpperBoundInfinite) + flags = (flags & ~RangeFlags.UpperBoundInclusive) | RangeFlags.UpperBoundInfinite; + } + + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte((byte)flags); + var lowerBoundInfinite = flags.HasFlag(RangeFlags.LowerBoundInfinite); + var upperBoundInfinite = flags.HasFlag(RangeFlags.UpperBoundInfinite); + if (value.IsEmpty || (lowerBoundInfinite && upperBoundInfinite)) + return; + + // Always need write state from this point. + if (writeState is null) + throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}."); + + if (!lowerBoundInfinite) + { + Debug.Assert(lowerBoundSize.Value != -1); + if (lowerBoundSize.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var byteCount = lowerBoundSize.Value; // Never -1 so it's a byteCount. + if (writer.ShouldFlush(sizeof(int))) // Length + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(byteCount); + using var _ = await writer.BeginNestedWrite(async, _subtypeRequirements.Write, byteCount, + writeState.LowerBoundWriteState, cancellationToken).ConfigureAwait(false); + if (async) + await _subtypeConverter.WriteAsync(writer, value.LowerBound!, cancellationToken).ConfigureAwait(false); + else + _subtypeConverter.Write(writer, value.LowerBound!); + } + + if (!upperBoundInfinite) + { + Debug.Assert(upperBoundSize.Value != -1); + if (upperBoundSize.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var byteCount = upperBoundSize.Value; // Never -1 so it's a byteCount. + if (writer.ShouldFlush(sizeof(int))) // Length + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(byteCount); + using var _ = await writer.BeginNestedWrite(async, _subtypeRequirements.Write, byteCount, + writeState.UpperBoundWriteState, cancellationToken).ConfigureAwait(false); + if (async) + await _subtypeConverter.WriteAsync(writer, value.UpperBound!, cancellationToken).ConfigureAwait(false); + else + _subtypeConverter.Write(writer, value.UpperBound!); + } + } + + sealed class WriteState + { + internal Size LowerBoundSize { get; set; } + internal object? LowerBoundWriteState { get; set; } + internal Size UpperBoundSize { get; set; } + internal object? UpperBoundWriteState { get; set; } + } +} diff --git a/src/Npgsql/Internal/Converters/RecordConverter.cs b/src/Npgsql/Internal/Converters/RecordConverter.cs new file mode 100644 index 0000000000..14fbe4aabd --- /dev/null +++ b/src/Npgsql/Internal/Converters/RecordConverter.cs @@ -0,0 +1,76 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +sealed class RecordConverter(PgSerializerOptions options, Func? factory = null) : PgStreamingConverter +{ + static bool IsObjectArrayRecord => typeof(T) == typeof(object[]); + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var fieldCount = reader.ReadInt32(); + var result = new object[fieldCount]; + for (var i = 0; i < fieldCount; i++) + { + if (reader.ShouldBuffer(sizeof(uint) + sizeof(int))) + await reader.Buffer(async, sizeof(uint) + sizeof(int), cancellationToken).ConfigureAwait(false); + + var typeOid = reader.ReadUInt32(); + var length = reader.ReadInt32(); + + // Note that we leave .NET nulls in the object array rather than DBNull. + if (length == -1) + continue; + + var postgresType = + options.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() + ?? throw new NotSupportedException($"Reading isn't supported for record field {i} (unknown type OID {typeOid}"); + var pgTypeId = options.ToCanonicalTypeId(postgresType); + + // TODO resolve based on types expected by _factory (pass in a Type[] during construcion) + // Only allow object polymorphism for object[] records; valuetuple records always have exact types. + var typeInfo = (IsObjectArrayRecord ? options.GetTypeInfo(typeof(object), pgTypeId) : options.GetDefaultTypeInfo(pgTypeId)) + ?? throw new NotSupportedException( + $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); + + var concreteTypeInfo = typeInfo.MakeConcreteForField(Field.CreateUnspecified(pgTypeId)); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(IsObjectArrayRecord ? typeof(object) : null, options, pgTypeId, resolved: true); + var binding = concreteTypeInfo.BindField(DataFormat.Binary); + var scope = await reader.BeginNestedRead(async, length, binding.BufferRequirement, cancellationToken).ConfigureAwait(false); + try + { + result[i] = await concreteTypeInfo.Converter.ReadAsObject(async, reader, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + + return factory is null ? (T)(object)result : factory(result); + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => throw new NotSupportedException(); + + public override void Write(PgWriter writer, T value) + => throw new NotSupportedException(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => throw new NotSupportedException(); +} diff --git a/src/Npgsql/Internal/Converters/StreamConverter.cs b/src/Npgsql/Internal/Converters/StreamConverter.cs new file mode 100644 index 0000000000..4f89ffa11d --- /dev/null +++ b/src/Npgsql/Internal/Converters/StreamConverter.cs @@ -0,0 +1,73 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class StreamConverter(bool supportsTextFormat) : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return supportsTextFormat + ? format is DataFormat.Text or DataFormat.Binary + : format is DataFormat.Binary; + } + + public override Stream Read(PgReader reader) + => reader.GetStream(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => new(reader.GetStream()); + + public override Size GetSize(SizeContext context, Stream value, ref object? writeState) + { + if (value.CanSeek) + return checked((int)(value.Length - value.Position)); + + var memoryStream = new MemoryStream(); + value.CopyTo(memoryStream); + writeState = memoryStream; + return checked((int)memoryStream.Length); + } + + public override void Write(PgWriter writer, Stream value) + { + if (writer.Current.WriteState is not null) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) + throw new InvalidOperationException(); + + writer.WriteBytes(writeStateSegment.AsSpan()); + return; + } + + // Non-derived MemoryStream fast path + if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) + writer.WriteBytes(segment.AsSpan((int)value.Position)); + else + value.CopyTo(writer.GetStream()); + } + + public override ValueTask WriteAsync(PgWriter writer, Stream value, CancellationToken cancellationToken = default) + { + if (writer.Current.WriteState is not null) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) + throw new InvalidOperationException(); + + return writer.WriteBytesAsync(writeStateSegment.AsMemory(), cancellationToken); + } + + // Non-derived MemoryStream fast path + if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) + { + return writer.WriteBytesAsync(segment.AsMemory((int)value.Position), cancellationToken); + } + else + { + return new ValueTask(value.CopyToAsync(writer.GetStream(), cancellationToken)); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs new file mode 100644 index 0000000000..41e2cb83da --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs @@ -0,0 +1,91 @@ +using System; +using Npgsql.Properties; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DateOnlyDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + static readonly DateOnly BaseValue = new(2000, 1, 1); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override DateOnly ReadCore(PgReader reader) + => reader.ReadInt32() switch + { + int.MaxValue => dateTimeInfinityConversions + ? DateOnly.MaxValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + int.MinValue => dateTimeInfinityConversions + ? DateOnly.MinValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + var value => BaseValue.AddDays(value) + }; + + protected override void WriteCore(PgWriter writer, DateOnly value) + { + if (dateTimeInfinityConversions) + { + if (value == DateOnly.MaxValue) + { + writer.WriteInt32(int.MaxValue); + return; + } + + if (value == DateOnly.MinValue) + { + writer.WriteInt32(int.MinValue); + return; + } + } + + writer.WriteInt32(value.DayNumber - BaseValue.DayNumber); + } +} + +sealed class DateTimeDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + static readonly DateTime BaseValue = new(2000, 1, 1, 0, 0, 0); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override DateTime ReadCore(PgReader reader) + => reader.ReadInt32() switch + { + int.MaxValue => dateTimeInfinityConversions + ? DateTime.MaxValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + int.MinValue => dateTimeInfinityConversions + ? DateTime.MinValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + var value => BaseValue + TimeSpan.FromDays(value) + }; + + protected override void WriteCore(PgWriter writer, DateTime value) + { + if (dateTimeInfinityConversions) + { + if (value == DateTime.MaxValue) + { + writer.WriteInt32(int.MaxValue); + return; + } + + if (value == DateTime.MinValue) + { + writer.WriteInt32(int.MinValue); + return; + } + } + + writer.WriteInt32((value.Date - BaseValue).Days); + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs new file mode 100644 index 0000000000..389c2ec021 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs @@ -0,0 +1,40 @@ +using System; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DateTimeConverter(bool dateTimeInfinityConversions, DateTimeKind kind) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTime ReadCore(PgReader reader) + => PgTimestamp.Decode(reader.ReadInt64(), kind, dateTimeInfinityConversions); + + protected override void WriteCore(PgWriter writer, DateTime value) + => writer.WriteInt64(PgTimestamp.Encode(value, dateTimeInfinityConversions)); +} + +sealed class DateTimeOffsetConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTimeOffset ReadCore(PgReader reader) + => new(PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, dateTimeInfinityConversions), TimeSpan.Zero); + + protected override void WriteCore(PgWriter writer, DateTimeOffset value) + { + if (value.Offset != TimeSpan.Zero) + throw new ArgumentException($"Cannot write DateTimeOffset with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', only offset 0 (UTC) is supported. ", nameof(value)); + + writer.WriteInt64(PgTimestamp.Encode(value.DateTime, dateTimeInfinityConversions)); + + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs new file mode 100644 index 0000000000..34fdbf73ba --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs @@ -0,0 +1,169 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +delegate PgConcreteTypeInfo? DateTimeTypeInfoProviderDelegate( + DateTimeTypeInfoProvider provider, ProviderValueContext context, T? value, ref object? writeState); + +sealed class DateTimeTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgSerializerOptions _options; + readonly DateTimeTypeInfoProviderDelegate _provider; + readonly Func _factory; + readonly PgTypeId _timestampTz; + readonly PgConcreteTypeInfo _timestampTzConcreteTypeInfo; + readonly PgTypeId _timestamp; + readonly PgConcreteTypeInfo _timestampConcreteTypeInfo; + readonly bool _dateTimeInfinityConversions; + + internal DateTimeTypeInfoProvider(PgSerializerOptions options, DateTimeTypeInfoProviderDelegate provider, + Func factory, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + { + _options = options; + _provider = provider; + _factory = factory; + _timestampTz = timestampTz; + _timestamp = timestamp; + _dateTimeInfinityConversions = dateTimeInfinityConversions; + _timestampTzConcreteTypeInfo = new(options, factory(timestampTz), timestampTz); + _timestampConcreteTypeInfo = new(options, factory(timestamp), timestamp); + } + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + { + if (pgTypeId == _timestampTz) + return _timestampTzConcreteTypeInfo; + if (pgTypeId is null || pgTypeId == _timestamp) + return _timestampConcreteTypeInfo; + + throw new ArgumentOutOfRangeException(nameof(pgTypeId), pgTypeId, "Unsupported PgTypeId."); + } + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, T? value, ref object? writeState) + => _provider(this, context, value, ref writeState); + + public PgConcreteTypeInfo? Get(ProviderValueContext context, DateTime value, bool validateOnly = false) + { + Debug.Assert(!validateOnly || context.ExpectedPgTypeId is not null); + if (value.Kind is DateTimeKind.Utc) + { + // We coalesce with expectedPgTypeId to throw on unknown type ids. + return context.ExpectedPgTypeId == _timestamp + ? throw new ArgumentException( + string.Format(NpgsqlStrings.TimestampNoDateTimeUtc, + _options.GetDataTypeName(_timestamp).DisplayName, + _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)) + : validateOnly ? null : GetDefault(context.ExpectedPgTypeId ?? _timestampTz); + } + + // For timestamptz types we'll accept unspecified MinValue/MaxValue as well. + if (context.ExpectedPgTypeId == _timestampTz + && !(_dateTimeInfinityConversions && (value == DateTime.MinValue || value == DateTime.MaxValue))) + { + throw new ArgumentException( + string.Format(NpgsqlStrings.TimestampTzNoDateTimeUnspecified, value.Kind, + _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)); + } + + // We coalesce with expectedPgTypeId to throw on unknown type ids. + return validateOnly ? null : GetDefault(context.ExpectedPgTypeId ?? _timestamp); + } +} + +sealed class DateTimeTypeInfoProvider +{ + public static DateTimeTypeInfoProvider CreateProvider(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + => new(options, static (provider, context, value, ref writeState) => provider.Get(context, value), pgTypeId => + { + if (pgTypeId == timestampTz) + return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc); + if (pgTypeId == timestamp) + return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + + public static DateTimeTypeInfoProvider> CreateRangeProvider( + PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + => new(options, static (provider, context, value, ref writeState) => + { + // Resolve both sides to make sure we end up with consistent PgTypeIds. + PgConcreteTypeInfo? concreteTypeInfo = null; + if (!value.LowerBoundInfinite) + { + concreteTypeInfo = provider.Get(context, value.LowerBound); + context = context with { ExpectedPgTypeId = concreteTypeInfo?.PgTypeId ?? context.ExpectedPgTypeId }; + } + + if (!value.UpperBoundInfinite) + { + var result = provider.Get(context, value.UpperBound, validateOnly: concreteTypeInfo is not null); + concreteTypeInfo ??= result; + } + + return concreteTypeInfo; + }, pgTypeId => + { + if (pgTypeId == timestampTz) + return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc)); + if (pgTypeId == timestamp) + return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified)); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + + public static DateTimeTypeInfoProvider CreateMultirangeProvider( + PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + where T : IList where TElement : notnull + { + if (typeof(TElement) != typeof(NpgsqlRange)) + ThrowHelper.ThrowNotSupportedException("Unsupported element type"); + + return new DateTimeTypeInfoProvider(options, static (provider, context, value, ref writeState) => + { + PgConcreteTypeInfo? concreteTypeInfo = null; + if (value is null) + return null; + + foreach (var element in (IList>)value) + { + PgConcreteTypeInfo? result; + if (!element.LowerBoundInfinite) + { + result = provider.Get(context, element.LowerBound, validateOnly: concreteTypeInfo is not null); + if (concreteTypeInfo is null && result is not null) + { + concreteTypeInfo = result; + context = context with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + } + if (!element.UpperBoundInfinite) + { + result = provider.Get(context, element.UpperBound, validateOnly: concreteTypeInfo is not null); + if (concreteTypeInfo is null && result is not null) + { + concreteTypeInfo = result; + context = context with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + } + } + return concreteTypeInfo; + }, pgTypeId => + { + if (pgTypeId == timestampTz) + return new MultirangeConverter( + (PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc))); + if (pgTypeId == timestamp) + return new MultirangeConverter( + (PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified))); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs b/src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs new file mode 100644 index 0000000000..1e1cbe9df2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs @@ -0,0 +1,58 @@ +using System; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TimeSpanIntervalConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override TimeSpan ReadCore(PgReader reader) + { + var microseconds = reader.ReadInt64(); + var days = reader.ReadInt32(); + var months = reader.ReadInt32(); + + return months > 0 + ? throw new InvalidCastException( + "Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.") + : new(microseconds * 10 + days * TimeSpan.TicksPerDay); + } + + protected override void WriteCore(PgWriter writer, TimeSpan value) + { + var ticksInDay = value.Ticks - TimeSpan.TicksPerDay * value.Days; + writer.WriteInt64(ticksInDay / 10); + writer.WriteInt32(value.Days); + writer.WriteInt32(0); + } +} + +sealed class NpgsqlIntervalConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override NpgsqlInterval ReadCore(PgReader reader) + { + var ticks = reader.ReadInt64(); + var day = reader.ReadInt32(); + var month = reader.ReadInt32(); + return new NpgsqlInterval(month, day, ticks); + } + + protected override void WriteCore(PgWriter writer, NpgsqlInterval value) + { + writer.WriteInt64(value.Time); + writer.WriteInt32(value.Days); + writer.WriteInt32(value.Months); + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs new file mode 100644 index 0000000000..8bcca02db1 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs @@ -0,0 +1,60 @@ +using System; + +namespace Npgsql.Internal.Converters; + +sealed class LegacyDateTimeConverter(bool dateTimeInfinityConversions, bool timestamp) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTime ReadCore(PgReader reader) + { + if (timestamp) + { + return PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Unspecified, dateTimeInfinityConversions); + } + + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, dateTimeInfinityConversions); + return (dateTime == DateTime.MinValue || dateTime == DateTime.MaxValue) && dateTimeInfinityConversions + ? dateTime + : dateTime.ToLocalTime(); + } + + protected override void WriteCore(PgWriter writer, DateTime value) + { + if (!timestamp && value.Kind is DateTimeKind.Local) + value = value.ToUniversalTime(); + + writer.WriteInt64(PgTimestamp.Encode(value, dateTimeInfinityConversions)); + } +} + +sealed class LegacyDateTimeOffsetConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTimeOffset ReadCore(PgReader reader) + { + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, dateTimeInfinityConversions); + + if (dateTimeInfinityConversions) + { + if (dateTime == DateTime.MinValue) + return DateTimeOffset.MinValue; + if (dateTime == DateTime.MaxValue) + return DateTimeOffset.MaxValue; + } + + return dateTime.ToLocalTime(); + } + + protected override void WriteCore(PgWriter writer, DateTimeOffset value) + => writer.WriteInt64(PgTimestamp.Encode(value.UtcDateTime, dateTimeInfinityConversions)); +} diff --git a/src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs b/src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs new file mode 100644 index 0000000000..6a44ccbdc9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs @@ -0,0 +1,43 @@ +using System; + +namespace Npgsql.Internal.Converters; + +static class PgTimestamp +{ + const long PostgresTimestampOffsetTicks = 630822816000000000L; + + internal static long Encode(DateTime value, bool dateTimeInfinityConversions) + { + if (dateTimeInfinityConversions) + { + if (value.Ticks == DateTime.MaxValue.Ticks) + return long.MaxValue; + if (value.Ticks == DateTime.MinValue.Ticks) + return long.MinValue; + } + // Rounding here would cause problems because we would round up DateTime.MaxValue + // which would make it impossible to retrieve it back from the database, so we just drop the additional precision + return (value.Ticks - PostgresTimestampOffsetTicks) / 10; + } + + internal static DateTime Decode(long value, DateTimeKind kind, bool dateTimeInfinityConversions) + { + try + { + return value switch + { + long.MaxValue => dateTimeInfinityConversions + ? DateTime.MaxValue + : throw new InvalidCastException("Cannot read infinity value since DisableDateTimeInfinityConversions is true."), + long.MinValue => dateTimeInfinityConversions + ? DateTime.MinValue + : throw new InvalidCastException("Cannot read infinity value since DisableDateTimeInfinityConversions is true."), + _ => new(value * 10 + PostgresTimestampOffsetTicks, kind) + }; + } + catch (ArgumentOutOfRangeException e) + { + throw new InvalidCastException("Out of range of DateTime (year must be between 1 and 9999).", e); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs new file mode 100644 index 0000000000..09385712bf --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs @@ -0,0 +1,50 @@ +using System; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TimeOnlyTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + protected override TimeOnly ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); + protected override void WriteCore(PgWriter writer, TimeOnly value) => writer.WriteInt64(value.Ticks / 10); +} + +sealed class TimeSpanTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + protected override TimeSpan ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); + protected override void WriteCore(PgWriter writer, TimeSpan value) => writer.WriteInt64(value.Ticks / 10); +} + +sealed class DateTimeOffsetTimeTzConverter : PgBufferedConverter +{ + // Binary Format: int64 expressing microseconds, int32 expressing timezone in seconds, negative + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override DateTimeOffset ReadCore(PgReader reader) + { + // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. + var ticks = reader.ReadInt64() * 10; + var offset = new TimeSpan(0, 0, -reader.ReadInt32()); + return new DateTimeOffset(ticks + TimeSpan.TicksPerDay, offset); + } + + protected override void WriteCore(PgWriter writer, DateTimeOffset value) + { + writer.WriteInt64(value.TimeOfDay.Ticks / 10); + writer.WriteInt32(-(int)(value.Offset.Ticks / TimeSpan.TicksPerSecond)); + } +} diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs new file mode 100644 index 0000000000..3dd6a5519b --- /dev/null +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -0,0 +1,96 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class VersionPrefixedTextConverter(byte versionPrefix, PgConverter textConverter) + : PgStreamingConverter(textConverter.DbNullPredicateKind is DbNullPredicate.Custom) +{ + BufferRequirements _innerRequirements; + + protected override bool IsDbNullValue(T? value, object? writeState) => textConverter.IsDbNull(value, writeState); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => VersionPrefixedTextConverter.CanConvert(textConverter, format, out _innerRequirements, out bufferRequirements); + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public override Size GetSize(SizeContext context, [DisallowNull]T value, ref object? writeState) + => textConverter.GetSize(context, value, ref writeState).Combine(context.Format is DataFormat.Binary ? sizeof(byte) : 0); + + public override void Write(PgWriter writer, [DisallowNull]T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, [DisallowNull]T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + await VersionPrefixedTextConverter.ReadVersion(async, versionPrefix, reader, _innerRequirements.Read, cancellationToken).ConfigureAwait(false); + return async ? await textConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) : textConverter.Read(reader); + } + + async ValueTask Write(bool async, PgWriter writer, [DisallowNull]T value, CancellationToken cancellationToken) + { + await VersionPrefixedTextConverter.WriteVersion(async, versionPrefix, writer, cancellationToken).ConfigureAwait(false); + if (async) + await textConverter.WriteAsync(writer, value, cancellationToken).ConfigureAwait(false); + else + textConverter.Write(writer, value); + } +} + +static class VersionPrefixedTextConverter +{ + public static async ValueTask WriteVersion(bool async, byte version, PgWriter writer, CancellationToken cancellationToken) + { + if (writer.Current.Format is not DataFormat.Binary) + return; + + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte(version); + } + + public static async ValueTask ReadVersion(bool async, byte expectedVersion, PgReader reader, Size textConverterReadRequirement, CancellationToken cancellationToken) + { + if (reader.Current.Format is not DataFormat.Binary) + return; + + if (!reader.IsResumed) + { + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + var actualVersion = reader.ReadByte(); + if (actualVersion != expectedVersion) + throw new InvalidCastException($"Unknown wire format version: {actualVersion}"); + } + + var byteCount = BufferRequirements.GetMinimumBufferByteCount(textConverterReadRequirement, reader.CurrentRemaining); + if (reader.ShouldBuffer(byteCount)) + await reader.Buffer(async, byteCount, cancellationToken).ConfigureAwait(false); + } + + public static bool CanConvert(PgConverter textConverter, DataFormat format, out BufferRequirements textConverterRequirements, out BufferRequirements bufferRequirements) + { + var success = textConverter.CanConvert(format, out textConverterRequirements); + if (!success) + { + bufferRequirements = default; + return false; + } + if (textConverter.CanConvert(format is DataFormat.Binary ? DataFormat.Text : DataFormat.Binary, out var otherRequirements) && otherRequirements != textConverterRequirements) + throw new InvalidOperationException("Text converter should have identical requirements for text and binary formats."); + + bufferRequirements = format is DataFormat.Binary ? textConverterRequirements.Combine(sizeof(byte)) : textConverterRequirements; + + return success; + } +} diff --git a/src/Npgsql/Internal/DataFormat.cs b/src/Npgsql/Internal/DataFormat.cs new file mode 100644 index 0000000000..c52b418b7d --- /dev/null +++ b/src/Npgsql/Internal/DataFormat.cs @@ -0,0 +1,31 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public enum DataFormat : byte +{ + Binary, + Text +} + +static class DataFormatUtils +{ + public static DataFormat Create(short formatCode) + => formatCode switch + { + 0 => DataFormat.Text, + 1 => DataFormat.Binary, + _ => throw new ArgumentOutOfRangeException(nameof(formatCode), formatCode, "Unknown postgres format code, please file a bug,") + }; + + public static short ToFormatCode(this DataFormat dataFormat) + => dataFormat switch + { + DataFormat.Text => 0, + DataFormat.Binary => 1, + _ => throw new UnreachableException() + }; +} diff --git a/src/Npgsql/Internal/DbTypeResolverFactory.cs b/src/Npgsql/Internal/DbTypeResolverFactory.cs new file mode 100644 index 0000000000..55b3b71235 --- /dev/null +++ b/src/Npgsql/Internal/DbTypeResolverFactory.cs @@ -0,0 +1,9 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.DbTypeResolverExperimental)] +public abstract class DbTypeResolverFactory +{ + public abstract IDbTypeResolver CreateDbTypeResolver(NpgsqlDatabaseInfo databaseInfo); +} diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs new file mode 100644 index 0000000000..cdc6449331 --- /dev/null +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -0,0 +1,162 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver +{ + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is null) + return null; + + var context = GetMappings(type, dataTypeName.GetValueOrDefault(), options); + return context?.Find(type, dataTypeName.GetValueOrDefault(), options); + } + + protected static DynamicMappingCollection CreateCollection(TypeInfoMappingCollection? baseCollection = null) => new(baseCollection); + + protected static bool IsTypeOrNullableOfType(Type type, Func predicate, out Type matchedType) + { + matchedType = Nullable.GetUnderlyingType(type) ?? type; + return predicate(matchedType); + } + + protected static bool IsArrayLikeType(Type type, [NotNullWhen(true)]out Type? elementType) => TypeInfoMappingCollection.IsArrayLikeType(type, out elementType); + + protected static bool IsArrayDataTypeName(DataTypeName dataTypeName, PgSerializerOptions options, out DataTypeName elementDataTypeName) + { + if (options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType arrayType) + { + elementDataTypeName = arrayType.Element.DataTypeName; + return true; + } + + elementDataTypeName = default; + return false; + } + + protected abstract DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options); + + [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] + protected class DynamicMappingCollection + { + TypeInfoMappingCollection? _mappings; + + internal DynamicMappingCollection(TypeInfoMappingCollection? baseCollection = null) + { + if (baseCollection is not null) + _mappings = new(baseCollection); + } + + public DynamicMappingCollection AddMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + { + if (type.IsValueType) + { + if (Nullable.GetUnderlyingType(type) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); + } + else + { + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); + } + return this; + } + + public DynamicMappingCollection AddArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) + { + if (elementType.IsValueType) + { + if (Nullable.GetUnderlyingType(elementType) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddStructArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } + else + { + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } + return this; + } + + public DynamicMappingCollection AddProviderMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + { + if (type.IsValueType) + { + if (Nullable.GetUnderlyingType(type) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); + } + else + { + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); + } + return this; + } + + public DynamicMappingCollection AddProviderArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) + { + if (elementType.IsValueType) + { + if (Nullable.GetUnderlyingType(elementType) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderStructArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } + else + { + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } + return this; + } + + internal PgTypeInfo? Find(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => _mappings?.Find(type, dataTypeName, options); + + public TypeInfoMappingCollection ToTypeInfoMappingCollection() + => new(_mappings?.Items ?? Array.Empty()); + } +} diff --git a/src/Npgsql/Internal/IDbTypeResolver.cs b/src/Npgsql/Internal/IDbTypeResolver.cs new file mode 100644 index 0000000000..c4586a2bee --- /dev/null +++ b/src/Npgsql/Internal/IDbTypeResolver.cs @@ -0,0 +1,28 @@ +using System; +using System.Data; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +/// +/// An Npgsql resolver for DbType. Used by Npgsql to resolve a DbType to DataTypeName and back. +/// +[Experimental(NpgsqlDiagnostics.DbTypeResolverExperimental)] +public interface IDbTypeResolver +{ + /// + /// Attempts to resolve a DbType to a data type name. + /// + /// The DbType name to resolve. + /// The type of the value to resolve a data type name for. + /// The data type name if it could be mapped, the name can be non-normalized and without schema. + string? GetDataTypeName(DbType dbType, Type? type); + + /// + /// Attempts to resolve a data type name to a DbType. + /// + /// The data type name to map, in a normalized form but possibly without schema. + /// The DbType if it could be mapped, null otherwise. + DbType? GetDbType(DataTypeName dataTypeName); +} diff --git a/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs b/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs index ccdb7a8477..6d388c9a80 100644 --- a/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs +++ b/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs @@ -1,4 +1,5 @@ -using System.Threading.Tasks; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; using Npgsql.Util; namespace Npgsql.Internal; @@ -8,6 +9,7 @@ namespace Npgsql.Internal; /// and the types it contains. When first connecting to a database, Npgsql will attempt to load information /// about it via this factory. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public interface INpgsqlDatabaseInfoFactory { /// @@ -19,4 +21,4 @@ public interface INpgsqlDatabaseInfoFactory /// database isn't of the correct type and isn't handled by this factory. /// Task Load(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async); -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/IPgTypeInfoResolver.cs b/src/Npgsql/Internal/IPgTypeInfoResolver.cs new file mode 100644 index 0000000000..b7b3ddc9ec --- /dev/null +++ b/src/Npgsql/Internal/IPgTypeInfoResolver.cs @@ -0,0 +1,21 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +/// +/// An Npgsql resolver for type info. Used by Npgsql to read and write values to PostgreSQL. +/// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public interface IPgTypeInfoResolver +{ + /// + /// Resolve a type info for a given type and data type name, at least one value will be non-null. + /// + /// The clr type being requested. + /// The postgres type being requested. + /// Used for configuration state and Npgsql type info or PostgreSQL type catalog lookups. + /// A result, or null if there was no match. + PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options); +} diff --git a/src/Npgsql/Internal/IntegratedSecurityHandler.cs b/src/Npgsql/Internal/IntegratedSecurityHandler.cs new file mode 100644 index 0000000000..7589cc59e8 --- /dev/null +++ b/src/Npgsql/Internal/IntegratedSecurityHandler.cs @@ -0,0 +1,38 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Npgsql.Properties; + +namespace Npgsql.Internal; + +class IntegratedSecurityHandler +{ + public virtual bool IsSupported => false; + + public virtual ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) + { + connectionLogger.LogDebug(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); + return new(); + } + + public virtual ValueTask NegotiateAuthentication(bool async, bool isKerberos, NpgsqlConnector connector, CancellationToken cancellationToken) + => throw new NotSupportedException(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); + + public virtual ValueTask GSSEncrypt(bool async, bool isRequired, NpgsqlConnector connector, CancellationToken cancellationToken) + => throw new NotSupportedException(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); +} + +sealed class RealIntegratedSecurityHandler : IntegratedSecurityHandler +{ + public override bool IsSupported => true; + + public override ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) + => KerberosUsernameProvider.GetUsername(async, includeRealm, connectionLogger, cancellationToken); + + public override ValueTask NegotiateAuthentication(bool async, bool isKerberos, NpgsqlConnector connector, CancellationToken cancellationToken) + => connector.AuthenticateGSS(async, isKerberos, cancellationToken); + + public override ValueTask GSSEncrypt(bool async, bool isRequired, NpgsqlConnector connector, CancellationToken cancellationToken) + => connector.GSSEncrypt(async, isRequired, cancellationToken); +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 02a8890dde..9d30a1dac8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -1,7 +1,6 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; -using System.Linq; using System.Net.Security; using System.Security.Cryptography; using System.Security.Cryptography.X509Certificates; @@ -19,131 +18,120 @@ partial class NpgsqlConnector { async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { - timeout.CheckAndApply(this); - var msg = ExpectAny(await ReadMessage(async), this); - switch (msg.AuthRequestType) + var requiredAuthModes = Settings.RequireAuthModes; + if (requiredAuthModes == default) + requiredAuthModes = NpgsqlConnectionStringBuilder.ParseAuthMode(PostgresEnvironment.RequireAuth); + + var authenticated = false; + + while (true) { - case AuthenticationRequestType.AuthenticationOk: - return; + timeout.CheckAndApply(this); + var msg = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); + switch (msg.AuthRequestType) + { + case AuthenticationRequestType.Ok: + // If we didn't complete authentication, check whether it's allowed + if (!authenticated) + { + // User requested GSS authentication, but server said that no auth is required + // If and only if our connection is gss encrypted, we consider us already authenticated + if (requiredAuthModes.HasFlag(RequireAuthMode.GSS) && IsGssEncrypted) + return; + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.None); + } + return; - case AuthenticationRequestType.AuthenticationCleartextPassword: - await AuthenticateCleartext(username, async, cancellationToken); - return; + case AuthenticationRequestType.CleartextPassword: + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.Password); + await AuthenticateCleartext(username, async, cancellationToken).ConfigureAwait(false); + break; - case AuthenticationRequestType.AuthenticationMD5Password: - await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken); - return; + case AuthenticationRequestType.MD5Password: + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.MD5); + await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken).ConfigureAwait(false); + break; - case AuthenticationRequestType.AuthenticationSASL: - await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, cancellationToken); - return; + case AuthenticationRequestType.SASL: + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.ScramSHA256); + await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, + cancellationToken).ConfigureAwait(false); + break; - case AuthenticationRequestType.AuthenticationGSS: - case AuthenticationRequestType.AuthenticationSSPI: - await AuthenticateGSS(async); - return; + case AuthenticationRequestType.GSS: + case AuthenticationRequestType.SSPI: + ThrowIfNotAllowed(requiredAuthModes, msg.AuthRequestType == AuthenticationRequestType.GSS ? RequireAuthMode.GSS : RequireAuthMode.SSPI); + var isKerberos = msg.AuthRequestType == AuthenticationRequestType.GSS; + await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, isKerberos, this, cancellationToken).ConfigureAwait(false); + return; + + case AuthenticationRequestType.GSSContinue: + throw new NpgsqlException("Can't start auth cycle with AuthenticationGSSContinue"); + + default: + throw new NotSupportedException($"Authentication method not supported (Received: {msg.AuthRequestType})"); + } - case AuthenticationRequestType.AuthenticationGSSContinue: - throw new NpgsqlException("Can't start auth cycle with AuthenticationGSSContinue"); + authenticated = true; + } - default: - throw new NotSupportedException($"Authentication method not supported (Received: {msg.AuthRequestType})"); + static void ThrowIfNotAllowed(RequireAuthMode requiredAuthModes, RequireAuthMode requestedAuthMode) + { + if (!requiredAuthModes.HasFlag(requestedAuthMode)) + throw new NpgsqlException($"\"{requestedAuthMode}\" authentication method is not allowed. Allowed methods: {requiredAuthModes}"); } } async Task AuthenticateCleartext(string username, bool async, CancellationToken cancellationToken = default) { - var passwd = await GetPassword(username, async, cancellationToken); - if (passwd == null) + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); + if (string.IsNullOrEmpty(passwd)) throw new NpgsqlException("No password has been provided but the backend requires one (in cleartext)"); var encoded = new byte[Encoding.UTF8.GetByteCount(passwd) + 1]; Encoding.UTF8.GetBytes(passwd, 0, passwd.Length, encoded, 0); - await WritePassword(encoded, async, cancellationToken); - await Flush(async, cancellationToken); - ExpectAny(await ReadMessage(async), this); + await WritePassword(encoded, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); } - async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken = default) + async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken) { // At the time of writing PostgreSQL only supports SCRAM-SHA-256 and SCRAM-SHA-256-PLUS - var supportsSha256 = mechanisms.Contains("SCRAM-SHA-256"); - var supportsSha256Plus = mechanisms.Contains("SCRAM-SHA-256-PLUS"); - if (!supportsSha256 && !supportsSha256Plus) + var serverSupportsSha256 = mechanisms.Contains("SCRAM-SHA-256"); + var allowSha256 = serverSupportsSha256 && Settings.ChannelBinding != ChannelBinding.Require; + var serverSupportsSha256Plus = mechanisms.Contains("SCRAM-SHA-256-PLUS"); + var allowSha256Plus = serverSupportsSha256Plus && Settings.ChannelBinding != ChannelBinding.Disable; + if (!allowSha256 && !allowSha256Plus) + { + if (serverSupportsSha256 && Settings.ChannelBinding == ChannelBinding.Require) + throw new NpgsqlException($"Couldn't connect because {nameof(ChannelBinding)} is set to {nameof(ChannelBinding.Require)} " + + "but the server doesn't support SCRAM-SHA-256-PLUS"); + if (serverSupportsSha256Plus && Settings.ChannelBinding == ChannelBinding.Disable) + throw new NpgsqlException($"Couldn't connect because {nameof(ChannelBinding)} is set to {nameof(ChannelBinding.Disable)} " + + "but the server doesn't support SCRAM-SHA-256"); + throw new NpgsqlException("No supported SASL mechanism found (only SCRAM-SHA-256 and SCRAM-SHA-256-PLUS are supported for now). " + "Mechanisms received from server: " + string.Join(", ", mechanisms)); + } var mechanism = string.Empty; var cbindFlag = string.Empty; var cbind = string.Empty; var successfulBind = false; - if (supportsSha256Plus) - { - var sslStream = (SslStream)_stream; - if (sslStream.RemoteCertificate is null) - { - ConnectionLogger.LogWarning("Remote certificate null, falling back to SCRAM-SHA-256"); - } - else - { - using var remoteCertificate = new X509Certificate2(sslStream.RemoteCertificate); - // Checking for hashing algorithms - HashAlgorithm? hashAlgorithm = null; - var algorithmName = remoteCertificate.SignatureAlgorithm.FriendlyName; - if (algorithmName is null) - { - ConnectionLogger.LogWarning("Signature algorithm was null, falling back to SCRAM-SHA-256"); - } - else if (algorithmName.StartsWith("sha1", StringComparison.OrdinalIgnoreCase) || - algorithmName.StartsWith("md5", StringComparison.OrdinalIgnoreCase) || - algorithmName.StartsWith("sha256", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA256.Create(); - } - else if (algorithmName.StartsWith("sha384", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA384.Create(); - } - else if (algorithmName.StartsWith("sha512", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA512.Create(); - } - else - { - ConnectionLogger.LogWarning( - $"Support for signature algorithm {algorithmName} is not yet implemented, falling back to SCRAM-SHA-256"); - } - - if (hashAlgorithm != null) - { - using var _ = hashAlgorithm; - - // RFC 5929 - mechanism = "SCRAM-SHA-256-PLUS"; - // PostgreSQL only supports tls-server-end-point binding - cbindFlag = "p=tls-server-end-point"; - // SCRAM-SHA-256-PLUS depends on using ssl stream, so it's fine - var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); - - var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); - var cbindBytes = cbindFlagBytes.Concat(certificateHash).ToArray(); - cbind = Convert.ToBase64String(cbindBytes); - successfulBind = true; - IsScramPlus = true; - } - } - } + if (allowSha256Plus) + DataSource.TransportSecurityHandler.AuthenticateSASLSha256Plus(this, ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); - if (!successfulBind && supportsSha256) + if (!successfulBind && allowSha256) { mechanism = "SCRAM-SHA-256"; // We can get here if PostgreSQL supports only SCRAM-SHA-256 or there was an error while binding to SCRAM-SHA-256-PLUS + // Or the user specifically requested to not use bindings // So, we set 'n' (client does not support binding) if there was an error while binding // or 'y' (client supports but server doesn't) in other case - cbindFlag = supportsSha256Plus ? "n" : "y"; - cbind = supportsSha256Plus ? "biws" : "eSws"; + cbindFlag = serverSupportsSha256Plus ? "n" : "y"; + cbind = serverSupportsSha256Plus ? "biws" : "eSws"; successfulBind = true; IsScram = true; } @@ -154,17 +142,18 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async throw new NpgsqlException("Unable to bind to SCRAM-SHA-256-PLUS, check logs for more information"); } - var passwd = await GetPassword(username, async, cancellationToken) ?? - throw new NpgsqlException($"No password has been provided but the backend requires one (in SASL/{mechanism})"); + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); + if (string.IsNullOrEmpty(passwd)) + throw new NpgsqlException($"No password has been provided but the backend requires one (in SASL/{mechanism})"); // Assumption: the write buffer is big enough to contain all our outgoing messages var clientNonce = GetNonce(); - await WriteSASLInitialResponse(mechanism, PGUtil.UTF8Encoding.GetBytes($"{cbindFlag},,n=*,r={clientNonce}"), async, cancellationToken); - await Flush(async, cancellationToken); + await WriteSASLInitialResponse(mechanism, NpgsqlWriteBuffer.UTF8Encoding.GetBytes($"{cbindFlag},,n=*,r={clientNonce}"), async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); - var saslContinueMsg = Expect(await ReadMessage(async), this); - if (saslContinueMsg.AuthRequestType != AuthenticationRequestType.AuthenticationSASLContinue) + var saslContinueMsg = Expect(await ReadMessage(async).ConfigureAwait(false), this); + if (saslContinueMsg.AuthRequestType != AuthenticationRequestType.SASLContinue) throw new NpgsqlException("[SASL] AuthenticationSASLContinue message expected"); var firstServerMsg = AuthenticationSCRAMServerFirstMessage.Load(saslContinueMsg.Payload, ConnectionLogger); if (!firstServerMsg.Nonce.StartsWith(clientNonce, StringComparison.Ordinal)) @@ -174,10 +163,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var saltedPassword = Hi(passwd.Normalize(NormalizationForm.FormKC), saltBytes, firstServerMsg.Iteration); var clientKey = HMAC(saltedPassword, "Client Key"); - byte[] storedKey; - using (var sha256 = SHA256.Create()) - storedKey = sha256.ComputeHash(clientKey); - + var storedKey = SHA256.HashData(clientKey); var clientFirstMessageBare = $"n=*,r={clientNonce}"; var serverFirstMessage = $"r={firstServerMsg.Nonce},s={firstServerMsg.Salt},i={firstServerMsg.Iteration}"; var clientFinalMessageWithoutProof = $"c={cbind},r={firstServerMsg.Nonce}"; @@ -193,21 +179,17 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var messageStr = $"{clientFinalMessageWithoutProof},p={clientProof}"; - await WriteSASLResponse(Encoding.UTF8.GetBytes(messageStr), async, cancellationToken); - await Flush(async, cancellationToken); + await WriteSASLResponse(Encoding.UTF8.GetBytes(messageStr), async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); - var saslFinalServerMsg = Expect(await ReadMessage(async), this); - if (saslFinalServerMsg.AuthRequestType != AuthenticationRequestType.AuthenticationSASLFinal) + var saslFinalServerMsg = Expect(await ReadMessage(async).ConfigureAwait(false), this); + if (saslFinalServerMsg.AuthRequestType != AuthenticationRequestType.SASLFinal) throw new NpgsqlException("[SASL] AuthenticationSASLFinal message expected"); var scramFinalServerMsg = AuthenticationSCRAMServerFinalMessage.Load(saslFinalServerMsg.Payload, ConnectionLogger); if (scramFinalServerMsg.ServerSignature != Convert.ToBase64String(serverSignature)) throw new NpgsqlException("[SCRAM] Unable to verify server signature"); - var okMsg = ExpectAny(await ReadMessage(async), this); - if (okMsg.AuthRequestType != AuthenticationRequestType.AuthenticationOk) - throw new NpgsqlException("[SASL] Expected AuthenticationOK message"); - static string GetNonce() { @@ -219,10 +201,78 @@ static string GetNonce() } } -#if NET6_0_OR_GREATER + internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindFlag, ref string cbind, + ref bool successfulBind) + { + // The check below is copied from libpq (with commentary) + // https://github.com/postgres/postgres/blob/98640f960eb9ed80cf90de3ef5d2e829b785b3eb/src/interfaces/libpq/fe-auth.c#L507-L517 + + // The server offered SCRAM-SHA-256-PLUS, but the connection + // is not SSL-encrypted. That's not sane. Perhaps SSL was + // stripped by a proxy? There's no point in continuing, + // because the server will reject the connection anyway if we + // try authenticate without channel binding even though both + // the client and server supported it. The SCRAM exchange + // checks for that, to prevent downgrade attacks. + if (!IsSslEncrypted) + throw new NpgsqlException("Server offered SCRAM-SHA-256-PLUS authentication over a non-SSL connection"); + + var sslStream = (SslStream)_stream; + if (sslStream.RemoteCertificate is null) + { + ConnectionLogger.LogWarning("Remote certificate null, falling back to SCRAM-SHA-256"); + return; + } + + // While SslStream.RemoteCertificate is X509Certificate2, it actually returns X509Certificate2 + // But to be on the safe side we'll just create a new instance of it + using var remoteCertificate = new X509Certificate2(sslStream.RemoteCertificate); + // Checking for hashing algorithms + var algorithmName = remoteCertificate.SignatureAlgorithm.FriendlyName; + + HashAlgorithm? hashAlgorithm = algorithmName switch + { + not null when algorithmName.StartsWith("sha1", StringComparison.OrdinalIgnoreCase) => SHA256.Create(), + not null when algorithmName.StartsWith("md5", StringComparison.OrdinalIgnoreCase) => SHA256.Create(), + not null when algorithmName.StartsWith("sha256", StringComparison.OrdinalIgnoreCase) => SHA256.Create(), + not null when algorithmName.StartsWith("sha384", StringComparison.OrdinalIgnoreCase) => SHA384.Create(), + not null when algorithmName.StartsWith("sha512", StringComparison.OrdinalIgnoreCase) => SHA512.Create(), + not null when algorithmName.StartsWith("sha3-256", StringComparison.OrdinalIgnoreCase) => SHA3_256.Create(), + not null when algorithmName.StartsWith("sha3-384", StringComparison.OrdinalIgnoreCase) => SHA3_384.Create(), + not null when algorithmName.StartsWith("sha3-512", StringComparison.OrdinalIgnoreCase) => SHA3_512.Create(), + + _ => null + }; + + if (hashAlgorithm is null) + { + ConnectionLogger.LogWarning( + algorithmName is null + ? "Signature algorithm was null, falling back to SCRAM-SHA-256" + : $"Support for signature algorithm {algorithmName} is not yet implemented, falling back to SCRAM-SHA-256"); + return; + } + + using var _ = hashAlgorithm; + + // RFC 5929 + mechanism = "SCRAM-SHA-256-PLUS"; + // PostgreSQL only supports tls-server-end-point binding + cbindFlag = "p=tls-server-end-point"; + // SCRAM-SHA-256-PLUS depends on using ssl stream, so it's fine + var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); + + var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); + var cbindBytes = new byte[cbindFlagBytes.Length + certificateHash.Length]; + cbindFlagBytes.CopyTo(cbindBytes, 0); + certificateHash.CopyTo(cbindBytes, cbindFlagBytes.Length); + cbind = Convert.ToBase64String(cbindBytes); + successfulBind = true; + IsScramPlus = true; + } + static byte[] Hi(string str, byte[] salt, int count) => Rfc2898DeriveBytes.Pbkdf2(str, salt, count, HashAlgorithmName.SHA256, 256 / 8); -#endif static byte[] Xor(byte[] buffer1, byte[] buffer2) { @@ -231,36 +281,31 @@ static byte[] Xor(byte[] buffer1, byte[] buffer2) return buffer1; } - static byte[] HMAC(byte[] data, string key) - { - using var hmacsha256 = new HMACSHA256(data); - return hmacsha256.ComputeHash(Encoding.UTF8.GetBytes(key)); - } + static byte[] HMAC(byte[] key, string data) => HMACSHA256.HashData(key, Encoding.UTF8.GetBytes(data)); async Task AuthenticateMD5(string username, byte[] salt, bool async, CancellationToken cancellationToken = default) { - var passwd = await GetPassword(username, async, cancellationToken); - if (passwd == null) + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); + if (string.IsNullOrEmpty(passwd)) throw new NpgsqlException("No password has been provided but the backend requires one (in MD5)"); byte[] result; - using (var md5 = MD5.Create()) { // First phase - var passwordBytes = PGUtil.UTF8Encoding.GetBytes(passwd); - var usernameBytes = PGUtil.UTF8Encoding.GetBytes(username); + var passwordBytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(passwd); + var usernameBytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(username); var cryptBuf = new byte[passwordBytes.Length + usernameBytes.Length]; passwordBytes.CopyTo(cryptBuf, 0); usernameBytes.CopyTo(cryptBuf, passwordBytes.Length); var sb = new StringBuilder(); - var hashResult = md5.ComputeHash(cryptBuf); + var hashResult = MD5.HashData(cryptBuf); foreach (var b in hashResult) sb.Append(b.ToString("x2")); var prehash = sb.ToString(); - var prehashbytes = PGUtil.UTF8Encoding.GetBytes(prehash); + var prehashbytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(prehash); cryptBuf = new byte[prehashbytes.Length + 4]; Array.Copy(salt, 0, cryptBuf, prehashbytes.Length, 4); @@ -269,55 +314,73 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio prehashbytes.CopyTo(cryptBuf, 0); sb = new StringBuilder("md5"); - hashResult = md5.ComputeHash(cryptBuf); + hashResult = MD5.HashData(cryptBuf); foreach (var b in hashResult) sb.Append(b.ToString("x2")); var resultString = sb.ToString(); result = new byte[Encoding.UTF8.GetByteCount(resultString) + 1]; Encoding.UTF8.GetBytes(resultString, 0, resultString.Length, result, 0); - result[result.Length - 1] = 0; + result[^1] = 0; } - await WritePassword(result, async, cancellationToken); - await Flush(async, cancellationToken); - ExpectAny(await ReadMessage(async), this); + await WritePassword(result, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); } -#if NET7_0_OR_GREATER - async Task AuthenticateGSS(bool async) + internal async ValueTask AuthenticateGSS(bool async, bool isKerberos, CancellationToken cancellationToken) { - if (!IntegratedSecurity) - throw new NpgsqlException("GSS/SSPI authentication but IntegratedSecurity not enabled"); - var targetName = $"{KerberosServiceName}/{Host}"; - - using var authContext = new NegotiateAuthentication(new NegotiateAuthenticationClientOptions{ TargetName = targetName}); + // See https://github.com/postgres/postgres/blob/a0dd0702e464f206b08c99a74cb58809c51aafa5/src/interfaces/libpq/fe-auth.c#L111-L123 + // We do not support delegation (TokenImpersonationLevel.Delegation) for now (#6540) + var clientOptions = new NegotiateAuthenticationClientOptions + { + TargetName = targetName, + RequireMutualAuthentication = true + }; + // If postgres requests GSS, we explicitly ask for Kerberos + // Instead of relying on SSPI on windows to pick the correct protocol (Kerberos instead of NTLM) + // Otherwise, leave Negotiate to allow SSPI to pick whatever it thinks is correct + // This behavior differs from libpq, which prefers SSPI to pick the protocol + // But mimics PGJDBC + // On UNIX only Kerberos is supported, so no need to differentiate between OSes + // TODO: PGJBC has a parameter to force SSPI. Not sure we need something like this. + if (isKerberos) + clientOptions.Package = "Kerberos"; + + NegotiateOptionsCallback?.Invoke(clientOptions); + + using var authContext = new NegotiateAuthentication(clientOptions); var data = authContext.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; - Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); - await WritePassword(data, 0, data.Length, async, UserCancellationToken); - await Flush(async, UserCancellationToken); + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) + { + // Unable to retrieve credentials or some other issue + throw new NpgsqlException($"Unable to authenticate with GSS: received {statusCode} instead of the expected ContinueNeeded or Completed"); + } + await WritePassword(data, 0, data.Length, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); while (true) { - var response = ExpectAny(await ReadMessage(async), this); - if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) + var response = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); + if (response.AuthRequestType == AuthenticationRequestType.Ok) break; - var gssMsg = response as AuthenticationGSSContinueMessage; - if (gssMsg == null) + if (response is not AuthenticationGSSContinueMessage gssMsg) throw new NpgsqlException($"Received unexpected authentication request message {response.AuthRequestType}"); - data = authContext.GetOutgoingBlob(gssMsg.AuthenticationData.AsSpan(), out statusCode)!; - if (statusCode == NegotiateAuthenticationStatusCode.Completed) + data = authContext.GetOutgoingBlob(gssMsg.AuthenticationData.AsSpan(), out statusCode); + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) + throw new NpgsqlException($"Error while authenticating GSS/SSPI: {statusCode}"); + // We might get NegotiateAuthenticationStatusCode.Completed but the data will not be null + // This can happen if it's the first cycle, in which case we have to send that data to complete handshake (#4888) + if (data is null) continue; - Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); - await WritePassword(data, 0, data.Length, async, UserCancellationToken); - await Flush(async, UserCancellationToken); + await WritePassword(data, 0, data.Length, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); } } -#endif async ValueTask GetPassword(string username, bool async, CancellationToken cancellationToken = default) { - var password = await DataSource.GetPassword(async, cancellationToken); + var password = await DataSource.GetPassword(async, cancellationToken).ConfigureAwait(false); if (password is not null) return password; diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index c61f7b48bd..b801b11b84 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -1,44 +1,45 @@ using System; using System.Collections.Generic; using System.Diagnostics; -using System.Linq; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; -using Npgsql.Util; -// ReSharper disable VariableHidesOuterVariable namespace Npgsql.Internal; partial class NpgsqlConnector { - internal Task WriteDescribe(StatementOrPortal statementOrPortal, string name, bool async, CancellationToken cancellationToken = default) + internal Task WriteDescribe(StatementOrPortal statementOrPortal, byte[] asciiName, bool async, CancellationToken cancellationToken = default) { - Debug.Assert(name.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(asciiName); var len = sizeof(byte) + // Message code sizeof(int) + // Length sizeof(byte) + // Statement or portal - (name.Length + 1); // Statement/portal name + (asciiName.Length + 1); // Statement/portal name - if (WriteBuffer.WriteSpaceLeft < len) - return FlushAndWrite(len, statementOrPortal, name, async, cancellationToken); + var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); + if (writeBuffer.WriteSpaceLeft < len) + return FlushAndWrite(len, statementOrPortal, asciiName, async, cancellationToken); - Write(len, statementOrPortal, name); + Write(writeBuffer, len, statementOrPortal, asciiName); return Task.CompletedTask; - async Task FlushAndWrite(int len, StatementOrPortal statementOrPortal, string name, bool async, CancellationToken cancellationToken) + async Task FlushAndWrite(int len, StatementOrPortal statementOrPortal, byte[] name, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); - Write(len, statementOrPortal, name); + Write(WriteBuffer, len, statementOrPortal, name); } - void Write(int len, StatementOrPortal statementOrPortal, string name) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void Write(NpgsqlWriteBuffer writeBuffer, int len, StatementOrPortal statementOrPortal, byte[] name) { - WriteBuffer.WriteByte(FrontendMessageCode.Describe); - WriteBuffer.WriteInt32(len - 1); - WriteBuffer.WriteByte((byte)statementOrPortal); - WriteBuffer.WriteNullTerminatedString(name); + writeBuffer.WriteByte(FrontendMessageCode.Describe); + writeBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte((byte)statementOrPortal); + writeBuffer.WriteNullTerminatedString(name); } } @@ -47,23 +48,26 @@ internal Task WriteSync(bool async, CancellationToken cancellationToken = defaul const int len = sizeof(byte) + // Message code sizeof(int); // Length - if (WriteBuffer.WriteSpaceLeft < len) + var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); + if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(async, cancellationToken); - Write(); + Write(writeBuffer); return Task.CompletedTask; async Task FlushAndWrite(bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); - Write(); + Write(WriteBuffer); } - void Write() + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void Write(NpgsqlWriteBuffer writeBuffer) { - WriteBuffer.WriteByte(FrontendMessageCode.Sync); - WriteBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte(FrontendMessageCode.Sync); + writeBuffer.WriteInt32(len - 1); } } @@ -76,31 +80,34 @@ internal Task WriteExecute(int maxRows, bool async, CancellationToken cancellati sizeof(byte) + // Null-terminated portal name (always empty for now) sizeof(int); // Max number of rows - if (WriteBuffer.WriteSpaceLeft < len) + var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); + if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(maxRows, async, cancellationToken); - Write(maxRows); + Write(writeBuffer, maxRows); return Task.CompletedTask; async Task FlushAndWrite(int maxRows, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(10 <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length 10 which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); - Write(maxRows); + Write(WriteBuffer, maxRows); } - void Write(int maxRows) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void Write(NpgsqlWriteBuffer writeBuffer, int maxRows) { - WriteBuffer.WriteByte(FrontendMessageCode.Execute); - WriteBuffer.WriteInt32(len - 1); - WriteBuffer.WriteByte(0); // Portal is always empty for now - WriteBuffer.WriteInt32(maxRows); + writeBuffer.WriteByte(FrontendMessageCode.Execute); + writeBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte(0); // Portal is always empty for now + writeBuffer.WriteInt32(maxRows); } } - internal async Task WriteParse(string sql, string statementName, List inputParameters, bool async, CancellationToken cancellationToken = default) + internal async Task WriteParse(string sql, byte[] asciiName, List inputParameters, bool async, CancellationToken cancellationToken = default) { - Debug.Assert(statementName.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(asciiName); int queryByteLen; try @@ -113,71 +120,70 @@ internal async Task WriteParse(string sql, string statementName, List parameters, string portal, - string statement, + byte[] asciiName, bool allResultTypesAreUnknown, bool[]? unknownResultTypeList, bool async, CancellationToken cancellationToken = default) { - Debug.Assert(statement.All(c => c < 128)); - Debug.Assert(portal.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(asciiName); + NpgsqlWriteBuffer.AssertASCIIOnly(portal); var headerLength = sizeof(byte) + // Message code sizeof(int) + // Message length sizeof(byte) + // Portal is always empty (only a null terminator) - statement.Length + sizeof(byte) + // Statement name plus null terminator + asciiName.Length + sizeof(byte) + // Statement name plus null terminator sizeof(ushort); // Number of parameter format codes that follow - if (WriteBuffer.WriteSpaceLeft < headerLength) - { - Debug.Assert(WriteBuffer.Size >= headerLength, "Write buffer too small for Bind header"); - await Flush(async, cancellationToken); - } - + var writeBuffer = WriteBuffer; var formatCodesSum = 0; var paramsLength = 0; for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) { var param = parameters[paramIndex]; - formatCodesSum += (int)param.FormatCode; - param.LengthCache?.Rewind(); - paramsLength += param.ValidateAndGetLength(); + param.Bind(out var format, out var size); + paramsLength += size.Value > 0 ? size.Value : 0; + formatCodesSum += format.ToFormatCode(); } var formatCodeListLength = formatCodesSum == 0 ? 0 : formatCodesSum == parameters.Count ? 1 : parameters.Count; @@ -190,119 +196,139 @@ internal async Task WriteBind( sizeof(short) + // Number of result format codes sizeof(short) * (unknownResultTypeList?.Length ?? 1); // Result format codes + WriteBuffer.StartMessage(messageLength); + if (WriteBuffer.WriteSpaceLeft < headerLength) + { + Debug.Assert(WriteBuffer.Size >= headerLength, "Write buffer too small for Bind header"); + await Flush(async, cancellationToken).ConfigureAwait(false); + } + WriteBuffer.WriteByte(FrontendMessageCode.Bind); WriteBuffer.WriteInt32(messageLength - 1); Debug.Assert(portal == string.Empty); - WriteBuffer.WriteByte(0); // Portal is always empty + writeBuffer.WriteByte(0); // Portal is always empty - WriteBuffer.WriteNullTerminatedString(statement); - WriteBuffer.WriteInt16(formatCodeListLength); + writeBuffer.WriteNullTerminatedString(asciiName); + writeBuffer.WriteInt16((short)formatCodeListLength); // 0 length implicitly means all-text, 1 means all-binary, >1 means mix-and-match if (formatCodeListLength == 1) { - if (WriteBuffer.WriteSpaceLeft < 2) - await Flush(async, cancellationToken); - WriteBuffer.WriteInt16((short)FormatCode.Binary); + if (writeBuffer.WriteSpaceLeft < sizeof(short)) + await Flush(async, cancellationToken).ConfigureAwait(false); + writeBuffer.WriteInt16(DataFormat.Binary.ToFormatCode()); } else if (formatCodeListLength > 1) { for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) { - if (WriteBuffer.WriteSpaceLeft < 2) - await Flush(async, cancellationToken); - WriteBuffer.WriteInt16((short)parameters[paramIndex].FormatCode); + if (writeBuffer.WriteSpaceLeft < sizeof(short)) + await Flush(async, cancellationToken).ConfigureAwait(false); + writeBuffer.WriteInt16(parameters[paramIndex].Format.ToFormatCode()); } } - if (WriteBuffer.WriteSpaceLeft < 2) - await Flush(async, cancellationToken); - - WriteBuffer.WriteUInt16((ushort)parameters.Count); + if (writeBuffer.WriteSpaceLeft < sizeof(ushort)) + await Flush(async, cancellationToken).ConfigureAwait(false); - for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) + writeBuffer.WriteUInt16((ushort)parameters.Count); + if (parameters.Count > 0) { - var param = parameters[paramIndex]; - param.LengthCache?.Rewind(); - await param.WriteWithLength(WriteBuffer, async, cancellationToken); + var writer = writeBuffer.GetWriter(DatabaseInfo, async ? FlushMode.NonBlocking : FlushMode.Blocking); + try + { + for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) + { + var param = parameters[paramIndex]; + await param.Write(async, writer, cancellationToken).ConfigureAwait(false); + } + } + catch(Exception ex) + { + Break(ex); + throw; + } } if (unknownResultTypeList != null) { - if (WriteBuffer.WriteSpaceLeft < 2 + unknownResultTypeList.Length * 2) - await Flush(async, cancellationToken); - WriteBuffer.WriteInt16(unknownResultTypeList.Length); + if (writeBuffer.WriteSpaceLeft < 2 + unknownResultTypeList.Length * 2) + await Flush(async, cancellationToken).ConfigureAwait(false); + writeBuffer.WriteInt16((short)unknownResultTypeList.Length); foreach (var t in unknownResultTypeList) - WriteBuffer.WriteInt16(t ? 0 : 1); + writeBuffer.WriteInt16((short)(t ? 0 : 1)); } else { - if (WriteBuffer.WriteSpaceLeft < 4) - await Flush(async, cancellationToken); - WriteBuffer.WriteInt16(1); - WriteBuffer.WriteInt16(allResultTypesAreUnknown ? 0 : 1); + if (writeBuffer.WriteSpaceLeft < 4) + await Flush(async, cancellationToken).ConfigureAwait(false); + writeBuffer.WriteInt16(1); + writeBuffer.WriteInt16((short)(allResultTypesAreUnknown ? 0 : 1)); } } - internal Task WriteClose(StatementOrPortal type, string name, bool async, CancellationToken cancellationToken = default) + internal Task WriteClose(StatementOrPortal type, byte[] asciiName, bool async, CancellationToken cancellationToken = default) { var len = sizeof(byte) + // Message code sizeof(int) + // Length sizeof(byte) + // Statement or portal - name.Length + sizeof(byte); // Statement or portal name plus null terminator + asciiName.Length + sizeof(byte); // Statement or portal name plus null terminator - if (WriteBuffer.WriteSpaceLeft < 10) - return FlushAndWrite(len, type, name, async, cancellationToken); + var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); + if (writeBuffer.WriteSpaceLeft < len) + return FlushAndWrite(len, type, asciiName, async, cancellationToken); - Write(len, type, name); + Write(writeBuffer, len, type, asciiName); return Task.CompletedTask; - async Task FlushAndWrite(int len, StatementOrPortal type, string name, bool async, CancellationToken cancellationToken) + async Task FlushAndWrite(int len, StatementOrPortal type, byte[] name, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); - Write(len, type, name); + Write(WriteBuffer, len, type, name); } - void Write(int len, StatementOrPortal type, string name) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void Write(NpgsqlWriteBuffer writeBuffer, int len, StatementOrPortal type, byte[] name) { - WriteBuffer.WriteByte(FrontendMessageCode.Close); - WriteBuffer.WriteInt32(len - 1); - WriteBuffer.WriteByte((byte)type); - WriteBuffer.WriteNullTerminatedString(name); + writeBuffer.WriteByte(FrontendMessageCode.Close); + writeBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte((byte)type); + writeBuffer.WriteNullTerminatedString(name); } } - internal void WriteQuery(string sql) => WriteQuery(sql, false).GetAwaiter().GetResult(); - internal async Task WriteQuery(string sql, bool async, CancellationToken cancellationToken = default) { var queryByteLen = TextEncoding.GetByteCount(sql); + var len = sizeof(byte) + + sizeof(int) + // Message length (including self excluding code) + queryByteLen + // Query byte length + sizeof(byte); + + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < 1 + 4) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.Query); - WriteBuffer.WriteInt32( - sizeof(int) + // Message length (including self excluding code) - queryByteLen + // Query byte length - sizeof(byte)); // Null terminator + WriteBuffer.WriteInt32(len - 1); - await WriteBuffer.WriteString(sql, queryByteLen, async, cancellationToken); + await WriteBuffer.WriteString(sql, queryByteLen, async, cancellationToken).ConfigureAwait(false); if (WriteBuffer.WriteSpaceLeft < 1) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(0); // Null terminator } - internal void WriteCopyDone() => WriteCopyDone(false).GetAwaiter().GetResult(); - internal async Task WriteCopyDone(bool async, CancellationToken cancellationToken = default) { const int len = sizeof(byte) + // Message code sizeof(int); // Length + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.CopyDone); WriteBuffer.WriteInt32(len - 1); @@ -316,8 +342,9 @@ internal async Task WriteCopyFail(bool async, CancellationToken cancellationToke sizeof(int) + // Length sizeof(byte); // Error message is always empty (only a null terminator) + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.CopyFail); WriteBuffer.WriteInt32(len - 1); @@ -333,6 +360,7 @@ internal void WriteCancelRequest(int backendProcessId, int backendSecretKey) Debug.Assert(backendProcessId != 0); + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) Flush(false).GetAwaiter().GetResult(); @@ -347,6 +375,7 @@ internal void WriteTerminate() const int len = sizeof(byte) + // Message code sizeof(int); // Length + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) Flush(false).GetAwaiter().GetResult(); @@ -359,6 +388,7 @@ internal void WriteSslRequest() const int len = sizeof(int) + // Length sizeof(int); // SSL request code + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) Flush(false).GetAwaiter().GetResult(); @@ -366,6 +396,19 @@ internal void WriteSslRequest() WriteBuffer.WriteInt32(80877103); } + internal void WriteGSSEncryptRequest() + { + const int len = sizeof(int) + // Length + sizeof(int); // GSSEnc request code + + WriteBuffer.StartMessage(len); + if (WriteBuffer.WriteSpaceLeft < len) + Flush(false).GetAwaiter().GetResult(); + + WriteBuffer.WriteInt32(len); + WriteBuffer.WriteInt32(80877104); + } + internal void WriteStartup(Dictionary parameters) { const int protocolVersion3 = 3 << 16; // 196608 @@ -375,10 +418,11 @@ internal void WriteStartup(Dictionary parameters) sizeof(byte); // Trailing zero byte foreach (var kvp in parameters) - len += PGUtil.UTF8Encoding.GetByteCount(kvp.Key) + 1 + - PGUtil.UTF8Encoding.GetByteCount(kvp.Value) + 1; + len += NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(kvp.Key) + 1 + + NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(kvp.Value) + 1; // Should really never happen, just in case + WriteBuffer.StartMessage(len); if (len > WriteBuffer.Size) throw new Exception("Startup message bigger than buffer"); @@ -402,8 +446,10 @@ internal void WriteStartup(Dictionary parameters) internal async Task WritePassword(byte[] payload, int offset, int count, bool async, CancellationToken cancellationToken = default) { + WriteBuffer.StartMessage(sizeof(byte) + sizeof(int) + count); if (WriteBuffer.WriteSpaceLeft < sizeof(byte) + sizeof(int)) - await WriteBuffer.Flush(async, cancellationToken); + await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); + WriteBuffer.WriteByte(FrontendMessageCode.Password); WriteBuffer.WriteInt32(sizeof(int) + count); @@ -414,20 +460,21 @@ internal async Task WritePassword(byte[] payload, int offset, int count, bool as return; } - await WriteBuffer.Flush(async, cancellationToken); - await WriteBuffer.DirectWrite(new ReadOnlyMemory(payload, offset, count), async, cancellationToken); + await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); + await WriteBuffer.DirectWrite(new ReadOnlyMemory(payload, offset, count), async, cancellationToken).ConfigureAwait(false); } internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialResponse, bool async, CancellationToken cancellationToken = default) { var len = sizeof(byte) + // Message code sizeof(int) + // Length - PGUtil.UTF8Encoding.GetByteCount(mechanism) + sizeof(byte) + // Mechanism plus null terminator + NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(mechanism) + sizeof(byte) + // Mechanism plus null terminator sizeof(int) + // Initial response length (initialResponse?.Length ?? 0); // Initial response payload + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) - await WriteBuffer.Flush(async, cancellationToken); + await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.Password); WriteBuffer.WriteInt32(len - 1); @@ -449,6 +496,7 @@ internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialRes internal Task WritePregenerated(byte[] data, bool async = false, CancellationToken cancellationToken = default) { + WriteBuffer.StartMessage(data.Length); if (WriteBuffer.WriteSpaceLeft < data.Length) return FlushAndWrite(data, async, cancellationToken); @@ -457,7 +505,7 @@ internal Task WritePregenerated(byte[] data, bool async = false, CancellationTok async Task FlushAndWrite(byte[] data, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(data.Length <= WriteBuffer.WriteSpaceLeft, $"Pregenerated message has length {data.Length} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); WriteBuffer.WriteBytes(data, 0, data.Length); } @@ -466,4 +514,4 @@ async Task FlushAndWrite(byte[] data, bool async, CancellationToken cancellation internal void Flush() => WriteBuffer.Flush(false).GetAwaiter().GetResult(); internal Task Flush(bool async, CancellationToken cancellationToken = default) => WriteBuffer.Flush(async, cancellationToken); -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs deleted file mode 100644 index 9b8afffadd..0000000000 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ /dev/null @@ -1,179 +0,0 @@ -using System; -using System.IO; -using System.Net; -using System.Net.Security; -using System.Security.Cryptography; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using static Npgsql.Util.Statics; - -namespace Npgsql.Internal; - - -partial class NpgsqlConnector -{ -#if !NET6_0_OR_GREATER - static byte[] Hi(string str, byte[] salt, int count) - { - using var hmac = new HMACSHA256(Encoding.UTF8.GetBytes(str)); - var salt1 = new byte[salt.Length + 4]; - byte[] hi, u1; - - Buffer.BlockCopy(salt, 0, salt1, 0, salt.Length); - salt1[salt1.Length - 1] = 1; - - hi = u1 = hmac.ComputeHash(salt1); - - for (var i = 1; i < count; i++) - { - var u2 = hmac.ComputeHash(u1); - NpgsqlConnector.Xor(hi, u2); - u1 = u2; - } - - return hi; - } -#endif - -#if !NET7_0_OR_GREATER - async Task AuthenticateGSS(bool async) - { - if (!IntegratedSecurity) - throw new NpgsqlException("GSS/SSPI authentication but IntegratedSecurity not enabled"); - - var targetName = $"{KerberosServiceName}/{Host}"; - - using var negotiateStream = new NegotiateStream(new GSSPasswordMessageStream(this), true); - try - { - if (async) - await negotiateStream.AuthenticateAsClientAsync(CredentialCache.DefaultNetworkCredentials, targetName); - else - negotiateStream.AuthenticateAsClient(CredentialCache.DefaultNetworkCredentials, targetName); - } - catch (AuthenticationCompleteException) - { - return; - } - catch (IOException e) when (e.InnerException is AuthenticationCompleteException) - { - return; - } - catch (IOException e) when (e.InnerException is PostgresException) - { - throw e.InnerException; - } - - throw new NpgsqlException("NegotiateStream.AuthenticateAsClient completed unexpectedly without signaling success"); - } - - /// - /// This Stream is placed between NegotiateStream and the socket's NetworkStream (or SSLStream). It intercepts - /// traffic and performs the following operations: - /// * Outgoing messages are framed in PostgreSQL's PasswordMessage, and incoming are stripped of it. - /// * NegotiateStream frames payloads with a 5-byte header, which PostgreSQL doesn't understand. This header is - /// stripped from outgoing messages and added to incoming ones. - /// - /// - /// See https://referencesource.microsoft.com/#System/net/System/Net/_StreamFramer.cs,16417e735f0e9530,references - /// - sealed class GSSPasswordMessageStream : Stream - { - readonly NpgsqlConnector _connector; - int _leftToWrite; - int _leftToRead, _readPos; - byte[]? _readBuf; - - internal GSSPasswordMessageStream(NpgsqlConnector connector) - => _connector = connector; - - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - => Write(buffer, offset, count, true, cancellationToken); - - public override void Write(byte[] buffer, int offset, int count) - => Write(buffer, offset, count, false).GetAwaiter().GetResult(); - - async Task Write(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - if (_leftToWrite == 0) - { - // We're writing the frame header, which contains the payload size. - _leftToWrite = (buffer[3] << 8) | buffer[4]; - - buffer[0] = 22; - if (buffer[1] != 1) - throw new NotSupportedException($"Received frame header major v {buffer[1]} (different from 1)"); - if (buffer[2] != 0) - throw new NotSupportedException($"Received frame header minor v {buffer[2]} (different from 0)"); - - // In case of payload data in the same buffer just after the frame header - if (count == 5) - return; - count -= 5; - offset += 5; - } - - if (count > _leftToWrite) - throw new NpgsqlException($"NegotiateStream trying to write {count} bytes but according to frame header we only have {_leftToWrite} left!"); - await _connector.WritePassword(buffer, offset, count, async, cancellationToken); - await _connector.Flush(async, cancellationToken); - _leftToWrite -= count; - } - - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - => Read(buffer, offset, count, true, cancellationToken); - - public override int Read(byte[] buffer, int offset, int count) - => Read(buffer, offset, count, false).GetAwaiter().GetResult(); - - async Task Read(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - if (_leftToRead == 0) - { - var response = ExpectAny(await _connector.ReadMessage(async), _connector); - if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) - throw new AuthenticationCompleteException(); - var gssMsg = response as AuthenticationGSSContinueMessage; - if (gssMsg == null) - throw new NpgsqlException($"Received unexpected authentication request message {response.AuthRequestType}"); - _readBuf = gssMsg.AuthenticationData; - _leftToRead = gssMsg.AuthenticationData.Length; - _readPos = 0; - buffer[0] = 22; - buffer[1] = 1; - buffer[2] = 0; - buffer[3] = (byte)((_leftToRead >> 8) & 0xFF); - buffer[4] = (byte)(_leftToRead & 0xFF); - return 5; - } - - if (count > _leftToRead) - throw new NpgsqlException($"NegotiateStream trying to read {count} bytes but according to frame header we only have {_leftToRead} left!"); - count = Math.Min(count, _leftToRead); - Array.Copy(_readBuf!, _readPos, buffer, offset, count); - _leftToRead -= count; - return count; - } - - public override void Flush() { } - - public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); - public override void SetLength(long value) => throw new NotSupportedException(); - - public override bool CanRead => true; - public override bool CanWrite => true; - public override bool CanSeek => false; - public override long Length => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - } - - sealed class AuthenticationCompleteException : Exception { } -#endif -} diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index cd1ef203bb..cedcfeac8b 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1,10 +1,11 @@ using System; using System.Buffers; +using System.Buffers.Binary; using System.Collections.Generic; using System.Data; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; -using System.Linq; using System.Net; using System.Net.Security; using System.Net.Sockets; @@ -15,23 +16,22 @@ using System.Security.Cryptography.X509Certificates; using System.Text; using System.Threading; -using System.Threading.Channels; using System.Threading.Tasks; using Npgsql.BackendMessages; -using Npgsql.TypeMapping; using Npgsql.Util; -using static Npgsql.Util.Statics; -using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Properties; +using static Npgsql.Util.Statics; + namespace Npgsql.Internal; /// /// Represents a connection to a PostgreSQL backend. Unlike NpgsqlConnection objects, which are /// exposed to users, connectors are internal to Npgsql and are recycled by the connection pool. /// -public sealed partial class NpgsqlConnector : IDisposable +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public sealed partial class NpgsqlConnector { #region Fields and Properties @@ -55,12 +55,14 @@ public sealed partial class NpgsqlConnector : IDisposable /// public NpgsqlConnectionStringBuilder Settings { get; } - Action? ClientCertificatesCallback { get; } - RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } + Action? SslClientAuthenticationOptionsCallback { get; } + #pragma warning disable CS0618 // ProvidePasswordCallback is obsolete ProvidePasswordCallback? ProvidePasswordCallback { get; } #pragma warning restore CS0618 + Action? NegotiateOptionsCallback { get; } + public Encoding TextEncoding { get; private set; } = default!; /// @@ -96,6 +98,17 @@ public sealed partial class NpgsqlConnector : IDisposable /// internal int BackendProcessId { get; private set; } + string? _inferredUserName; + + /// + /// The user name that has been inferred when the connector was opened + /// + internal string InferredUserName + { + get => _inferredUserName ?? throw new InvalidOperationException($"{nameof(InferredUserName)} cannot be accessed before the connector has been opened."); + private set => _inferredUserName = value; + } + bool SupportsPostgresCancellation => BackendProcessId != 0; /// @@ -103,12 +116,14 @@ public sealed partial class NpgsqlConnector : IDisposable /// internal int Id => BackendProcessId; + internal NpgsqlDataSource.ReloadableState ReloadableState = null!; + /// /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). /// - public NpgsqlDatabaseInfo DatabaseInfo { get; internal set; } = default!; - - internal TypeMapper TypeMapper { get; set; } = default!; + public NpgsqlDatabaseInfo DatabaseInfo => ReloadableState.DatabaseInfo; + internal PgSerializerOptions SerializerOptions => ReloadableState.SerializerOptions; + internal IDbTypeResolver? DbTypeResolver => ReloadableState.DbTypeResolver; /// /// The current transaction status for this connector. @@ -136,6 +151,13 @@ public sealed partial class NpgsqlConnector : IDisposable /// internal int PendingPrependedResponses { get; set; } + /// + /// A ManualResetEventSlim used to make sure a cancellation request doesn't run + /// while we're reading responses for the prepended query + /// as we can't gracefully handle their cancellation. + /// + readonly ManualResetEventSlim ReadingPrependedMessagesMRE = new(initialState: true); + internal NpgsqlDataReader? CurrentReader; internal PreparedStatementManager PreparedStatementManager { get; } @@ -156,7 +178,7 @@ public sealed partial class NpgsqlConnector : IDisposable /// /// Holds all run-time parameters in raw, binary format for efficient handling without allocations. /// - readonly List<(byte[] Name, byte[] Value)> _rawParameters = new(); + readonly List<(byte[] Name, byte[] Value)> _rawParameters = []; /// /// If this connector was broken, this contains the exception that caused the break. @@ -164,50 +186,24 @@ public sealed partial class NpgsqlConnector : IDisposable volatile Exception? _breakReason; /// - /// - /// Used by the pool to indicate that I/O is currently in progress on this connector, so that another write - /// isn't started concurrently. Note that since we have only one write loop, this is only ever usedto - /// protect against an over-capacity writes into a connector that's currently *asynchronously* writing. - /// - /// - /// It is guaranteed that the currently-executing - /// Specifically, reading may occur - and the connector may even be returned to the pool - before this is - /// released. - /// + /// A lock that's taken while a cancellation is being delivered; new queries are blocked until the + /// cancellation is delivered. This reduces the chance that a cancellation meant for a previous + /// command will accidentally cancel a later one, see #615. /// - internal volatile int MultiplexAsyncWritingLock; - - /// - internal void FlagAsNotWritableForMultiplexing() - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(CommandsInFlightCount > 0 || IsBroken || IsClosed, - $"About to mark multiplexing connector as non-writable, but {nameof(CommandsInFlightCount)} is {CommandsInFlightCount}"); - - Interlocked.Exchange(ref MultiplexAsyncWritingLock, 1); - } - - /// - internal void FlagAsWritableForMultiplexing() - { - Debug.Assert(Settings.Multiplexing); - if (Interlocked.CompareExchange(ref MultiplexAsyncWritingLock, 0, 1) != 1) - throw new Exception("Multiplexing lock was not taken when releasing. Please report a bug."); - } + object CancelLock { get; } = new(); /// - /// The timeout for reading messages that are part of the user's command - /// (i.e. which aren't internal prepended commands). + /// A lock that's taken to make sure no other concurrent operation is running. + /// Break takes it to set the state of the connector. + /// Anyone else should immediately check the state and exit + /// if the connector is closed. /// - /// Precision is milliseconds - internal int UserTimeout { private get; set; } + object SyncObj { get; } = new(); /// - /// A lock that's taken while a cancellation is being delivered; new queries are blocked until the - /// cancellation is delivered. This reduces the chance that a cancellation meant for a previous - /// command will accidentally cancel a later one, see #615. + /// A lock that's used to wait for the Cleanup to complete while breaking the connection. /// - internal object CancelLock { get; } + object CleanupLock { get; } = new(); readonly bool _isKeepAliveEnabled; readonly Timer? _keepAliveTimer; @@ -249,7 +245,15 @@ internal bool PostgresCancellationPerformed internal bool UserCancellationRequested => _userCancellationRequested; internal CancellationToken UserCancellationToken { get; set; } internal bool AttemptPostgresCancellation { get; private set; } - static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.FromMilliseconds(-1); + static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.Zero; + + static readonly SslApplicationProtocol _alpnProtocol = new("postgresql"); + +#pragma warning disable CA1859 + // We're casting to IDisposable to not explicitly reference X509Certificate2 for NativeAOT + // TODO: probably pointless now, needs to be rechecked + List? _certificates; +#pragma warning restore CA1859 internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } @@ -285,7 +289,7 @@ internal bool PostgresCancellationPerformed readonly ReadyForQueryMessage _readyForQueryMessage = new(); readonly ParameterDescriptionMessage _parameterDescriptionMessage = new(); readonly DataRowMessage _dataRowMessage = new(); - readonly RowDescriptionMessage _rowDescriptionMessage = new(); + readonly RowDescriptionMessage _rowDescriptionMessage = new(connectorOwned: true); // Since COPY is rarely used, allocate these lazily CopyInResponseMessage? _copyInResponseMessage; @@ -304,12 +308,34 @@ internal bool PostgresCancellationPerformed internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) : this(dataSource) { - if (conn.ProvideClientCertificatesCallback is not null) - ClientCertificatesCallback = certs => conn.ProvideClientCertificatesCallback(certs); - if (conn.UserCertificateValidationCallback is not null) - UserCertificateValidationCallback = conn.UserCertificateValidationCallback; - + var sslClientAuthenticationOptionsCallback = conn.SslClientAuthenticationOptionsCallback; #pragma warning disable CS0618 // Obsolete + var provideClientCertificatesCallback = conn.ProvideClientCertificatesCallback; + var userCertificateValidationCallback = conn.UserCertificateValidationCallback; + if (provideClientCertificatesCallback is not null || + userCertificateValidationCallback is not null) + { + if (sslClientAuthenticationOptionsCallback is not null) + throw new NotSupportedException(NpgsqlStrings.SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported); + + sslClientAuthenticationOptionsCallback = options => + { + if (provideClientCertificatesCallback is not null) + { + options.ClientCertificates ??= new X509Certificate2Collection(); + provideClientCertificatesCallback.Invoke(options.ClientCertificates); + } + + if (userCertificateValidationCallback is not null) + { + options.RemoteCertificateValidationCallback = userCertificateValidationCallback; + } + }; + } + + if (sslClientAuthenticationOptionsCallback is not null) + SslClientAuthenticationOptionsCallback = sslClientAuthenticationOptionsCallback; + ProvidePasswordCallback = conn.ProvidePasswordCallback; #pragma warning restore CS0618 } @@ -317,8 +343,7 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) NpgsqlConnector(NpgsqlConnector connector) : this(connector.DataSource) { - ClientCertificatesCallback = connector.ClientCertificatesCallback; - UserCertificateValidationCallback = connector.UserCertificateValidationCallback; + SslClientAuthenticationOptionsCallback = connector.SslClientAuthenticationOptionsCallback; ProvidePasswordCallback = connector.ProvidePasswordCallback; } @@ -334,42 +359,25 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) TransactionLogger = LoggingConfiguration.TransactionLogger; CopyLogger = LoggingConfiguration.CopyLogger; - ClientCertificatesCallback = dataSource.ClientCertificatesCallback; - UserCertificateValidationCallback = dataSource.UserCertificateValidationCallback; + SslClientAuthenticationOptionsCallback = dataSource.SslClientAuthenticationOptionsCallback; + NegotiateOptionsCallback = dataSource.Configuration.NegotiateOptionsCallback; State = ConnectorState.Closed; TransactionStatus = TransactionStatus.Idle; Settings = dataSource.Settings; PostgresParameters = new Dictionary(); - CancelLock = new object(); - _isKeepAliveEnabled = Settings.KeepAlive > 0; if (_isKeepAliveEnabled) - _keepAliveTimer = new Timer(PerformKeepAlive, null, Timeout.Infinite, Timeout.Infinite); - + { + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + _keepAliveTimer = new Timer(PerformKeepAlive, null, Timeout.Infinite, Timeout.Infinite); + } + DataReader = new NpgsqlDataReader(this); // TODO: Not just for automatic preparation anymore... PreparedStatementManager = new PreparedStatementManager(this); - - if (Settings.Multiplexing) - { - // Note: It's OK for this channel to be unbounded: each command enqueued to it is accompanied by sending - // it to PostgreSQL. If we overload it, a TCP zero window will make us block on the networking side - // anyway. - // Note: the in-flight channel can probably be single-writer, but that doesn't actually do anything - // at this point. And we currently rely on being able to complete the channel at any point (from - // Break). We may want to revisit this if an optimized, SingleWriter implementation is introduced. - var commandsInFlightChannel = Channel.CreateUnbounded( - new UnboundedChannelOptions { SingleReader = true }); - CommandsInFlightReader = commandsInFlightChannel.Reader; - CommandsInFlightWriter = commandsInFlightChannel.Writer; - - // TODO: Properly implement this - if (_isKeepAliveEnabled) - throw new NotImplementedException("Keepalive not yet implemented for multiplexing"); - } } #endregion @@ -381,27 +389,6 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) internal string Database => Settings.Database!; string KerberosServiceName => Settings.KerberosServiceName; int ConnectionTimeout => Settings.Timeout; - bool IntegratedSecurity => Settings.IntegratedSecurity; - - /// - /// The actual command timeout value that gets set on internal commands. - /// - /// Precision is milliseconds - int InternalCommandTimeout - { - get - { - var internalTimeout = Settings.InternalCommandTimeout; - if (internalTimeout == -1) - return Math.Max(Settings.CommandTimeout, MinimumInternalCommandTimeout) * 1000; - - // Todo: Decide what we really want here - // This assertion can easily fail if InternalCommandTimeout is set to 1 or 2 in the connection string - // We probably don't want to allow these values but in that case a Debug.Assert is the wrong way to enforce it. - Debug.Assert(internalTimeout == 0 || internalTimeout >= MinimumInternalCommandTimeout); - return internalTimeout * 1000; - } - } #endregion Configuration settings @@ -420,6 +407,10 @@ internal ConnectorState State var newState = (int)value; if (newState == _state) return; + + if (newState is < 0 or > (int)ConnectorState.Replication) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Unknown state: " + value); + Interlocked.Exchange(ref _state, newState); } } @@ -427,20 +418,7 @@ internal ConnectorState State /// /// Returns whether the connector is open, regardless of any task it is currently performing /// - bool IsConnected - => State switch - { - ConnectorState.Ready => true, - ConnectorState.Executing => true, - ConnectorState.Fetching => true, - ConnectorState.Waiting => true, - ConnectorState.Copy => true, - ConnectorState.Replication => true, - ConnectorState.Closed => false, - ConnectorState.Connecting => false, - ConnectorState.Broken => false, - _ => throw new ArgumentOutOfRangeException("Unknown state: " + State) - }; + internal bool IsConnected => State is not (ConnectorState.Closed or ConnectorState.Connecting or ConnectorState.Broken); internal bool IsReady => State == ConnectorState.Ready; internal bool IsClosed => State == ConnectorState.Closed; @@ -461,20 +439,30 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca State = ConnectorState.Connecting; LogMessages.OpeningPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString); - var stopwatch = Stopwatch.StartNew(); + var startOpenTimestamp = Stopwatch.GetTimestamp(); + + Activity? activity = null; try { - await OpenCore(this, Settings.SslMode, timeout, async, cancellationToken); + var username = await GetUsernameAsync(async, cancellationToken).ConfigureAwait(false); - await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken); + activity = NpgsqlActivitySource.PhysicalConnectionOpen(this); - Debug.Assert(DataSource.TypeMapper is not null); - Debug.Assert(DataSource.DatabaseInfo is not null); - TypeMapper = DataSource.TypeMapper; - DatabaseInfo = DataSource.DatabaseInfo; + var gssEncMode = GetGssEncMode(Settings); - if (Settings.Pooling && !Settings.Multiplexing && !Settings.NoResetOnClose && DatabaseInfo.SupportsDiscard) + await OpenCore(this, username, Settings.SslMode, gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); + + if (activity is not null) + NpgsqlActivitySource.Enrich(activity, this); + + await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken).ConfigureAwait(false); + + // The connector directly references the current reloadable state reference, to protect it against changes by a concurrent + // ReloadTypes. We update them here before returning the connector from the pool. + ReloadableState = DataSource.CurrentReloadableState; + + if (Settings.Pooling && Settings is { NoResetOnClose: false } && DatabaseInfo.SupportsDiscard) { _sendResetOnClose = true; GenerateResetMessage(); @@ -482,26 +470,12 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca OpenTimestamp = DateTime.UtcNow; - if (Settings.Multiplexing) - { - // Start an infinite async loop, which processes incoming multiplexing traffic. - // It is intentionally not awaited and will run as long as the connector is alive. - // The CommandsInFlightWriter channel is completed in Cleanup, which should cause this task - // to complete. - _ = Task.Run(MultiplexingReadLoop, CancellationToken.None) - .ContinueWith(t => - { - // Note that we *must* observe the exception if the task is faulted. - ConnectionLogger.LogError(t.Exception!, "Exception bubbled out of multiplexing read loop", Id); - }, TaskContinuationOptions.OnlyOnFaulted); - } - if (_isKeepAliveEnabled) { // Start the keep alive mechanism to work by scheduling the timer. // Otherwise, it doesn't work for cases when no query executed during // the connection lifetime in case of a new connector. - lock (this) + lock (SyncObj) { var keepAlive = Settings.KeepAlive * 1000; _keepAliveTimer!.Change(keepAlive, keepAlive); @@ -517,8 +491,8 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca try { if (async) - await DataSource.ConnectionInitializerAsync(tempConnection); - else if (!async) + await DataSource.ConnectionInitializerAsync(tempConnection).ConfigureAwait(false); + else DataSource.ConnectionInitializer(tempConnection); } finally @@ -531,42 +505,67 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca } } + activity?.Dispose(); + LogMessages.OpenedPhysicalConnection( - ConnectionLogger, Host, Port, Database, UserFacingConnectionString, stopwatch.ElapsedMilliseconds, Id); + ConnectionLogger, Host, Port, Database, UserFacingConnectionString, + (long)Stopwatch.GetElapsedTime(startOpenTimestamp).TotalMilliseconds, Id); } catch (Exception e) { - Break(e); + if (activity is not null) + NpgsqlActivitySource.SetException(activity, e); + Break(e, markHostAsOfflineOnConnecting: true); + FullCleanup(); throw; } static async Task OpenCore( NpgsqlConnector conn, + string username, SslMode sslMode, + GssEncryptionMode gssEncMode, NpgsqlTimeout timeout, bool async, - CancellationToken cancellationToken, - bool isFirstAttempt = true) + CancellationToken cancellationToken) { - await conn.RawOpen(sslMode, timeout, async, cancellationToken, isFirstAttempt); + // If we fail to connect to the socket, there is no reason to retry even if SslMode/GssEncryption allows it + await conn.RawOpen(timeout, async, cancellationToken).ConfigureAwait(false); - var username = await conn.GetUsernameAsync(async, cancellationToken); - - timeout.CheckAndApply(conn); - conn.WriteStartupMessage(username); - await conn.Flush(async, cancellationToken); - - var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); try { - await conn.Authenticate(username, timeout, async, cancellationToken); + await conn.SetupEncryption(sslMode, gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); + timeout.CheckAndApply(conn); + conn.WriteStartupMessage(username); + await conn.Flush(async, cancellationToken).ConfigureAwait(false); + + using var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); + await conn.Authenticate(username, timeout, async, cancellationToken).ConfigureAwait(false); } - catch (PostgresException e) - when (e.SqlState == PostgresErrorCodes.InvalidAuthorizationSpecification && - (sslMode == SslMode.Prefer && conn.IsSecure || sslMode == SslMode.Allow && !conn.IsSecure)) + catch (OperationCanceledException) { - cancellationRegistration.Dispose(); - Debug.Assert(!conn.IsBroken); + throw; + } + // We handle any exception here because on Windows while receiving a response from Postgres + // We might hit connection reset, in which case the actual error will be lost + // And we only read some IO error + // In addition, this behavior mimics libpq, where it retries as long as GssEncryptionMode and SslMode allows it + catch (Exception e) when + // We might also get here OperationCancelledException/TimeoutException + // But it's fine to fall down and retry because we'll immediately exit with the exact same exception + // + // Any error after trying with GSS encryption + (gssEncMode == GssEncryptionMode.Prefer || + // Auth error with/without SSL + (sslMode == SslMode.Prefer && conn.IsSslEncrypted || sslMode == SslMode.Allow && !conn.IsSslEncrypted)) + { + if (gssEncMode == GssEncryptionMode.Prefer) + { + conn.ConnectionLogger.LogTrace(e, "Error while opening physical connection with GSS encryption, retrying without it"); + gssEncMode = GssEncryptionMode.Disable; + } + else + sslMode = sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require; conn.Cleanup(); @@ -574,26 +573,25 @@ static async Task OpenCore( // If Allow was specified and we failed (without SSL), retry with SSL await OpenCore( conn, - sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require, + username, + sslMode, + gssEncMode, timeout, async, - cancellationToken, - isFirstAttempt: false); + cancellationToken).ConfigureAwait(false); return; } - using var _ = cancellationRegistration; - // We treat BackendKeyData as optional because some PostgreSQL-like database // don't send it (CockroachDB, CrateDB) - var msg = await conn.ReadMessage(async); + var msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg.Code == BackendMessageCode.BackendKeyData) { var keyDataMsg = (BackendKeyDataMessage)msg; conn.BackendProcessId = keyDataMsg.BackendProcessId; conn._backendSecretKey = keyDataMsg.BackendSecretKey; - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); } if (msg.Code != BackendMessageCode.ReadyForQuery) @@ -603,21 +601,174 @@ await OpenCore( } } + internal async ValueTask GSSEncrypt(bool async, bool isRequired, CancellationToken cancellationToken) + { + ConnectionLogger.LogTrace("Negotiating GSS encryption"); + + var targetName = $"{KerberosServiceName}/{Host}"; + // See https://github.com/postgres/postgres/blob/a0dd0702e464f206b08c99a74cb58809c51aafa5/src/interfaces/libpq/fe-secure-gssapi.c#L651-L658 + // We do not support delegation (TokenImpersonationLevel.Delegation) for now (#6540) + var clientOptions = new NegotiateAuthenticationClientOptions + { + TargetName = targetName, + RequireMutualAuthentication = true, + RequiredProtectionLevel = ProtectionLevel.EncryptAndSign, + // GSS encryption only works with kerberos + Package = "Kerberos" + }; + + NegotiateOptionsCallback?.Invoke(clientOptions); + + var authentication = new NegotiateAuthentication(clientOptions); + + try + { + byte[]? data; + NegotiateAuthenticationStatusCode statusCode; + + try + { + data = authentication.GetOutgoingBlob(ReadOnlySpan.Empty, out statusCode)!; + } + catch (TypeInitializationException) + { + // On UNIX .NET throws TypeInitializationException if it's unable to load the native library + if (isRequired) + throw new NpgsqlException("Unable to load native library to negotiate GSS encryption"); + + return GssEncryptionResult.GetCredentialFailure; + } + + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) + { + // Unable to retrieve credentials + // If it's required, throw an appropriate exception + if (isRequired) + throw new NpgsqlException($"Unable to negotiate GSS encryption: {statusCode}"); + + return GssEncryptionResult.GetCredentialFailure; + } + + WriteGSSEncryptRequest(); + await Flush(async, cancellationToken).ConfigureAwait(false); + + await ReadBuffer.Ensure(1, async).ConfigureAwait(false); + var response = (char)ReadBuffer.ReadByte(); + + // TODO: Server can respond with an error here + // but according to documentation we shouldn't display this error to the user/application + // since the server has not been authenticated (CVE-2024-10977) + // See https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-GSSAPI + switch (response) + { + default: + throw new NpgsqlException($"Received unknown response {response} for GSSEncRequest (expecting G or N)"); + case 'N': + if (isRequired) + throw new NpgsqlException("GGS encryption requested. No GSS encryption enabled connection from this host is configured."); + return GssEncryptionResult.NegotiateFailure; + case 'G': + break; + } + + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException( + "Additional unencrypted data received after GSS encryption negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + + var lengthBuffer = new byte[4]; + + await WriteGssEncryptMessage(async, data, lengthBuffer, cancellationToken).ConfigureAwait(false); + + while (true) + { + if (async) + await _stream.ReadExactlyAsync(lengthBuffer, cancellationToken).ConfigureAwait(false); + else + _stream.ReadExactly(lengthBuffer); + + var messageLength = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref lengthBuffer[0])) + : Unsafe.ReadUnaligned(ref lengthBuffer[0]); + + var buffer = ArrayPool.Shared.Rent(messageLength); + if (async) + await _stream.ReadExactlyAsync(buffer.AsMemory(0, messageLength), cancellationToken).ConfigureAwait(false); + else + _stream.ReadExactly(buffer.AsSpan(0, messageLength)); + + data = authentication.GetOutgoingBlob(buffer.AsSpan(0, messageLength), out statusCode); + ArrayPool.Shared.Return(buffer, clearArray: true); + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) + throw new NpgsqlException($"Error while negotiating GSS encryption: {statusCode}"); + + // TODO: the code below is the copy from GSS/SSPI auth + // It's unknown whether it holds true here or not + + // We might get NegotiateAuthenticationStatusCode.Completed but the data will not be null + // This can happen if it's the first cycle, in which case we have to send that data to complete handshake (#4888) + if (data is null) + { + Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.Completed); + break; + } + + await WriteGssEncryptMessage(async, data, lengthBuffer, cancellationToken).ConfigureAwait(false); + } + + _stream = new GSSStream(_stream, authentication); + ReadBuffer.Underlying = _stream; + WriteBuffer.Underlying = _stream; + IsGssEncrypted = true; + authentication = null; + + ConnectionLogger.LogTrace("GSS encryption successful"); + return GssEncryptionResult.Success; + + async ValueTask WriteGssEncryptMessage(bool async, byte[] data, byte[] lengthBuffer, CancellationToken cancellationToken) + { + BinaryPrimitives.WriteInt32BigEndian(lengthBuffer, data.Length); + + if (async) + { + await _stream.WriteAsync(lengthBuffer, cancellationToken).ConfigureAwait(false); + await _stream.WriteAsync(data, cancellationToken).ConfigureAwait(false); + await _stream.FlushAsync(cancellationToken).ConfigureAwait(false); + } + else + { + _stream.Write(lengthBuffer); + _stream.Write(data); + _stream.Flush(); + } + } + } + catch (Exception e) when (e is not OperationCanceledException) + { + throw new NpgsqlException("Exception while performing GSS encryption", e); + } + finally + { + authentication?.Dispose(); + } + } + internal async ValueTask QueryDatabaseState( NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken = default) { - using var cmd = CreateCommand("select pg_is_in_recovery(); SHOW default_transaction_read_only"); - cmd.CommandTimeout = (int)timeout.CheckAndGetTimeLeft().TotalSeconds; + using var batch = CreateBatch(); + batch.BatchCommands.Add(new NpgsqlBatchCommand("select pg_is_in_recovery()")); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SHOW default_transaction_read_only")); + batch.Timeout = (int)timeout.CheckAndGetTimeLeft().TotalSeconds; - var reader = async ? await cmd.ExecuteReaderAsync(cancellationToken) : cmd.ExecuteReader(); + var reader = async ? await batch.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false) : batch.ExecuteReader(); try { if (async) { - await reader.ReadAsync(cancellationToken); + await reader.ReadAsync(cancellationToken).ConfigureAwait(false); _isHotStandBy = reader.GetBoolean(0); - await reader.NextResultAsync(cancellationToken); - await reader.ReadAsync(cancellationToken); + await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); + await reader.ReadAsync(cancellationToken).ConfigureAwait(false); } else { @@ -626,7 +777,7 @@ internal async ValueTask QueryDatabaseState( reader.NextResult(); reader.Read(); } - + _isTransactionReadOnly = reader.GetString(0) != "off"; var databaseState = UpdateDatabaseState(); @@ -636,7 +787,7 @@ internal async ValueTask QueryDatabaseState( finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -655,8 +806,9 @@ void WriteStartupMessage(string username) if (Settings.Database is not null) startupParams["database"] = Settings.Database; - if (Settings.ApplicationName?.Length > 0) - startupParams["application_name"] = Settings.ApplicationName; + var applicationName = Settings.ApplicationName ?? PostgresEnvironment.AppName; + if (applicationName?.Length > 0) + startupParams["application_name"] = applicationName; if (Settings.SearchPath?.Length > 0) startupParams["search_path"] = Settings.SearchPath; @@ -682,48 +834,67 @@ void WriteStartupMessage(string username) WriteStartup(startupParams); } - async ValueTask GetUsernameAsync(bool async, CancellationToken cancellationToken) + ValueTask GetUsernameAsync(bool async, CancellationToken cancellationToken) { var username = Settings.Username; if (username?.Length > 0) - return username; + { + InferredUserName = username; + return new(username); + } username = PostgresEnvironment.User; if (username?.Length > 0) - return username; + { + InferredUserName = username; + return new(username); + } + + return GetUsernameAsyncInternal(); - if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + async ValueTask GetUsernameAsyncInternal() { - username = await KerberosUsernameProvider.GetUsernameAsync(Settings.IncludeRealm, ConnectionLogger, async, cancellationToken); + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + username = await DataSource.IntegratedSecurityHandler.GetUsername(async, Settings.IncludeRealm, ConnectionLogger, + cancellationToken).ConfigureAwait(false); + if (username?.Length > 0) + { + InferredUserName = username; + return username; + } + } + + username = Environment.UserName; if (username?.Length > 0) + { + InferredUserName = username; return username; - } - - username = Environment.UserName; - if (username?.Length > 0) - return username; + } - throw new NpgsqlException("No username could be found, please specify one explicitly"); + throw new NpgsqlException("No username could be found, please specify one explicitly"); + } } - async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken, bool isFirstAttempt = true) + async Task RawOpen(NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { - var cert = default(X509Certificate2?); try { if (async) - await ConnectAsync(timeout, cancellationToken); + await ConnectAsync(timeout, cancellationToken).ConfigureAwait(false); else Connect(timeout); + ConnectionLogger.LogTrace("Socket connected to {Host}:{Port}", Host, Port); + _baseStream = new NetworkStream(_socket, true); _stream = _baseStream; if (Settings.Encoding == "UTF8") { - TextEncoding = PGUtil.UTF8Encoding; - RelaxedTextEncoding = PGUtil.RelaxedUTF8Encoding; + TextEncoding = NpgsqlWriteBuffer.UTF8Encoding; + RelaxedTextEncoding = NpgsqlWriteBuffer.RelaxedUTF8Encoding; } else { @@ -736,145 +907,308 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat timeout.CheckAndApply(this); - IsSecure = false; + IsSslEncrypted = false; + IsGssEncrypted = false; + } + catch + { + _stream?.Dispose(); + _stream = null!; + + _baseStream?.Dispose(); + _baseStream = null!; + + _socket?.Dispose(); + _socket = null!; + + throw; + } + } + + async Task SetupEncryption(SslMode sslMode, GssEncryptionMode gssEncryptionMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) + { + var gssEncryptResult = await TryNegotiateGssEncryption(gssEncryptionMode, async, cancellationToken).ConfigureAwait(false); + if (gssEncryptResult == GssEncryptionResult.Success) + return; - if (sslMode is SslMode.Prefer or SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) + // TryNegotiateGssEncryption should already throw a much more meaningful exception + // if GSS encryption is required but for some reason we can't negotiate it. + // But since we have to return a specific result instead of generic true/false + // To make absolutely sure we didn't miss anything, recheck again + if (gssEncryptionMode == GssEncryptionMode.Require) + throw new NpgsqlException($"Unable to negotiate GSS encryption: {gssEncryptResult}"); + + timeout.CheckAndApply(this); + + if (GetSslNegotiation(Settings) == SslNegotiation.Direct) + { + // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but since we also allow environment variables... + if (Settings.SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) + throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); + if (gssEncryptResult == GssEncryptionResult.NegotiateFailure) { - WriteSslRequest(); - await Flush(async, cancellationToken); + // We can be here only if it's fallback from preferred (but failed) gss encryption + // In this case, direct encryption isn't going to work anymore, so we throw a bogus exception to retry again without gss + // Alternatively, we can instead just go with the usual route of writing SslRequest, ignoring direct ssl + // But this is how libpq works + Debug.Assert(gssEncryptionMode == GssEncryptionMode.Prefer); + // The exception message doesn't matter since we're going to retry again + throw new NpgsqlException(); + } - await ReadBuffer.Ensure(1, async); - var response = (char)ReadBuffer.ReadByte(); - timeout.CheckAndApply(this); + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + } + else if ((sslMode is SslMode.Prefer && DataSource.TransportSecurityHandler.SupportEncryption) || + sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) + { + WriteSslRequest(); + await Flush(async, cancellationToken).ConfigureAwait(false); - switch (response) - { - default: - throw new NpgsqlException($"Received unknown response {response} for SSLRequest (expecting S or N)"); - case 'N': - if (sslMode != SslMode.Prefer) - throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); - break; - case 'S': - var clientCertificates = new X509Certificate2Collection(); - var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; + await ReadBuffer.Ensure(1, async).ConfigureAwait(false); + var response = (char)ReadBuffer.ReadByte(); + timeout.CheckAndApply(this); - if (certPath != null) - { - var password = Settings.SslPassword; - - if (Path.GetExtension(certPath).ToUpperInvariant() != ".PFX") - { -#if NET5_0_OR_GREATER - // It's PEM time - var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; - cert = string.IsNullOrEmpty(password) - ? X509Certificate2.CreateFromPemFile(certPath, keyPath) - : X509Certificate2.CreateFromEncryptedPemFile(certPath, password, keyPath); - if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) - { - // Windows crypto API has a bug with pem certs - // See #3650 - using var previousCert = cert; - cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); - } -#else - throw new NotSupportedException("PEM certificates are only supported with .NET 5 and higher"); -#endif - } - - cert ??= new X509Certificate2(certPath, password); - clientCertificates.Add(cert); - } + switch (response) + { + default: + throw new NpgsqlException($"Received unknown response {response} for SSLRequest (expecting S or N)"); + case 'N': + if (sslMode != SslMode.Prefer) + throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); + break; + case 'S': + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); + break; + } - ClientCertificatesCallback?.Invoke(clientCertificates); + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + } + } - var checkCertificateRevocation = Settings.CheckCertificateRevocation; + async ValueTask TryNegotiateGssEncryption(GssEncryptionMode gssEncryptionMode, bool async, CancellationToken cancellationToken) + { + // GetCredentialFailure is essentially a nop (since we didn't send anything over the wire) + // So we can proceed further as if gss encryption wasn't even attempted + if (gssEncryptionMode == GssEncryptionMode.Disable) return GssEncryptionResult.GetCredentialFailure; - RemoteCertificateValidationCallback? certificateValidationCallback; + // Same thing as above, though in this case user doesn't require GSS encryption but didn't enable encryption + // Most of the time they're using the default value, in which case also exit without throwing an error + if (gssEncryptionMode == GssEncryptionMode.Prefer && !DataSource.TransportSecurityHandler.SupportEncryption) + return GssEncryptionResult.GetCredentialFailure; - if (UserCertificateValidationCallback is not null) - { - if (sslMode is SslMode.VerifyCA or SslMode.VerifyFull) - throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode)); + if (ConnectedEndPoint!.AddressFamily == AddressFamily.Unix) + { + if (gssEncryptionMode == GssEncryptionMode.Prefer) + return GssEncryptionResult.GetCredentialFailure; - if (Settings.RootCertificate is not null) - throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback)); + Debug.Assert(gssEncryptionMode == GssEncryptionMode.Require); + throw new NpgsqlException("GSS encryption isn't supported over unix socket"); + } - certificateValidationCallback = UserCertificateValidationCallback; - } - else if (sslMode is SslMode.Prefer or SslMode.Require) - { - if (isFirstAttempt && sslMode is SslMode.Require && !Settings.TrustServerCertificate) - throw new ArgumentException(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate); + return await DataSource.IntegratedSecurityHandler.GSSEncrypt(async, gssEncryptionMode == GssEncryptionMode.Require, this, cancellationToken) + .ConfigureAwait(false); + } - certificateValidationCallback = SslTrustServerValidation; - checkCertificateRevocation = false; - } - else if ((Settings.RootCertificate ?? PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is - { } certRootPath) - { - certificateValidationCallback = SslRootValidation(certRootPath, sslMode == SslMode.VerifyFull); - } - else if (sslMode == SslMode.VerifyCA) - { - certificateValidationCallback = SslVerifyCAValidation; - } - else - { - Debug.Assert(sslMode == SslMode.VerifyFull); - certificateValidationCallback = SslVerifyFullValidation; - } + static SslNegotiation GetSslNegotiation(NpgsqlConnectionStringBuilder settings) + { + if (settings.UserProvidedSslNegotiation is { } userProvidedSslNegotiation) + return userProvidedSslNegotiation; - timeout.CheckAndApply(this); + if (PostgresEnvironment.SslNegotiation is { } sslNegotiationEnv) + { + if (Enum.TryParse(sslNegotiationEnv, ignoreCase: true, out var sslNegotiation)) + return sslNegotiation; + } - try - { - var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); + // If user hasn't provided the value via connection string or environment variable + // Retrieve the default value from property + return settings.SslNegotiation; + } - var sslProtocols = SslProtocols.None; - // On .NET Framework SslProtocols.None can be disabled, see #3718 -#if NETSTANDARD2_0 - sslProtocols = SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12; -#endif + static GssEncryptionMode GetGssEncMode(NpgsqlConnectionStringBuilder settings) + { + if (settings.UserProvidedGssEncMode is { } userProvidedGssEncMode) + return userProvidedGssEncMode; - if (async) - await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, sslProtocols, checkCertificateRevocation); - else - sslStream.AuthenticateAsClient(Host, clientCertificates, sslProtocols, checkCertificateRevocation); + if (PostgresEnvironment.GssEncryptionMode is { } gssEncModeEnv) + { + if (Enum.TryParse(gssEncModeEnv, ignoreCase: true, out var gssEncMode)) + return gssEncMode; + } - _stream = sslStream; - } - catch (Exception e) + // If user hasn't provided the value via connection string or environment variable + // Retrieve the default value from property + return settings.GssEncryptionMode; + } + + internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) + { + ConnectionLogger.LogTrace("Negotiating SSL encryption"); + + var clientCertificates = new X509Certificate2Collection(); + var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; + + if (certPath != null) + { + var password = Settings.SslPassword; + + if (!string.Equals(Path.GetExtension(certPath), ".pfx", StringComparison.OrdinalIgnoreCase)) + { + // It's PEM time + var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; + + // With PEM certificates we might have multiple certificates in a single file + // Where the first one is a leaf (and it has to have a private key) + // And others are intermediate between it and CA cert + // To support this, we first load the leaf certificate with private key + // And then we load everything else including the leaf, but without private key + // And afterwards we just get rid of the duplicate + var firstClientCert = string.IsNullOrEmpty(password) + ? X509Certificate2.CreateFromPemFile(certPath, keyPath) + : X509Certificate2.CreateFromEncryptedPemFile(certPath, password, keyPath); + clientCertificates.Add(firstClientCert); + + clientCertificates.ImportFromPemFile(certPath); + clientCertificates[1].Dispose(); + clientCertificates.RemoveAt(1); + + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + for (var i = 0; i < clientCertificates.Count; i++) { - throw new NpgsqlException("Exception while performing SSL handshake", e); - } + var cert = clientCertificates[i]; - ReadBuffer.Underlying = _stream; - WriteBuffer.Underlying = _stream; - IsSecure = true; - ConnectionLogger.LogTrace("SSL negotiation successful"); - break; + // Windows crypto API has a bug with pem certs + // See #3650 + using var previousCert = cert; + cert = X509CertificateLoader.LoadPkcs12(cert.Export(X509ContentType.Pkcs12), null); + clientCertificates[i] = cert; + } } + } - if (ReadBuffer.ReadBytesLeft > 0) - throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + // If it's empty, it's probably PFX + if (clientCertificates.Count == 0) + { + var certs = X509CertificateLoader.LoadPkcs12CollectionFromFile(certPath, password); + clientCertificates.AddRange(certs); } - ConnectionLogger.LogTrace("Socket connected to {Host}:{Port}", Host, Port); + var certificates = new List(); + foreach (var certificate in clientCertificates) + certificates.Add(certificate); + _certificates = certificates; } - catch + + try { - cert?.Dispose(); + var checkCertificateRevocation = Settings.CheckCertificateRevocation; - _stream?.Dispose(); - _stream = null!; + RemoteCertificateValidationCallback? certificateValidationCallback; + X509Certificate2Collection? caCerts; + string? certRootPath = null; - _baseStream?.Dispose(); - _baseStream = null!; + if (sslMode is SslMode.Prefer or SslMode.Require) + { + certificateValidationCallback = SslTrustServerValidation; + checkCertificateRevocation = false; + } + else if (((caCerts = DataSource.TransportSecurityHandler.RootCertificatesCallback?.Invoke()) is not null && caCerts.Count > 0) || + (certRootPath = Settings.RootCertificate ?? + PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) + { + certificateValidationCallback = SslRootValidation(sslMode == SslMode.VerifyFull, certRootPath, caCerts); + } + else if (sslMode == SslMode.VerifyCA) + { + certificateValidationCallback = SslVerifyCAValidation; + } + else + { + Debug.Assert(sslMode == SslMode.VerifyFull); + certificateValidationCallback = SslVerifyFullValidation; + } - _socket?.Dispose(); - _socket = null!; + SslStreamCertificateContext? clientCertificateContext = null; + if (clientCertificates.Count > 0) + { + // SslClientAuthenticationOptions.ClientCertificates only sends trusted certificates or if they have private key + // Which makes us unable to send intermediate certificates + // Work around this by specifying the first certificate as target + // And others as additional + // See https://github.com/dotnet/runtime/issues/26323 + var clientCertificate = clientCertificates[0]; + clientCertificates.RemoveAt(0); + + clientCertificateContext = SslStreamCertificateContext.Create(clientCertificate, clientCertificates); + } + + var host = Host; + + timeout.CheckAndApply(this); + + var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false); + + var sslStreamOptions = new SslClientAuthenticationOptions + { + TargetHost = host, + ClientCertificateContext = clientCertificateContext, + EnabledSslProtocols = SslProtocols.None, + CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.NoCheck, + RemoteCertificateValidationCallback = certificateValidationCallback, + ApplicationProtocols = [_alpnProtocol] + }; + + if (SslClientAuthenticationOptionsCallback is not null) + { + SslClientAuthenticationOptionsCallback.Invoke(sslStreamOptions); + + // User changed remote certificate validation callback + // Check whether the change doesn't lead to unexpected behavior + if (sslStreamOptions.RemoteCertificateValidationCallback != certificateValidationCallback) + { + if (sslMode is SslMode.VerifyCA or SslMode.VerifyFull) + throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithCustomValidationCallback, sslMode)); + + if (Settings.RootCertificate is not null) + throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithCustomValidationCallback); + + if (DataSource.TransportSecurityHandler.RootCertificatesCallback is not null) + throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithCustomValidationCallback); + } + } + + try + { + if (async) + await sslStream.AuthenticateAsClientAsync(sslStreamOptions, cancellationToken).ConfigureAwait(false); + else + sslStream.AuthenticateAsClient(sslStreamOptions); + + _stream = sslStream; + sslStream = null; + } + catch (Exception e) when (e is not OperationCanceledException) + { + throw new NpgsqlException("Exception while performing SSL handshake", e); + } + finally + { + sslStream?.Dispose(); + } + + ReadBuffer.Underlying = _stream; + WriteBuffer.Underlying = _stream; + IsSslEncrypted = true; + ConnectionLogger.LogTrace("SSL negotiation successful"); + } + catch + { + _certificates?.ForEach(x => x.Dispose()); + _certificates = null; throw; } @@ -882,11 +1216,23 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat void Connect(NpgsqlTimeout timeout) { - // Note that there aren't any timeout-able or cancellable DNS methods - var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) - ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : Dns.GetHostAddresses(Host).Select(a => new IPEndPoint(a, Port)).ToArray(); - timeout.Check(); + EndPoint[]? endpoints; + if (NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath)) + { + endpoints = [new UnixDomainSocketEndPoint(socketPath!)]; + } + else + { + // Note that there aren't any timeout-able or cancellable DNS methods + try + { + endpoints = IPAddressesToEndpoints(Dns.GetHostAddresses(Host), Port); + } + catch (SocketException ex) + { + throw new NpgsqlException(ex.Message, ex); + } + } // Give each endpoint an equal share of the remaining time var perEndpointTimeout = -1; // Default to infinity @@ -909,6 +1255,9 @@ void Connect(NpgsqlTimeout timeout) try { + // Some options are not applied after the socket is open, see #6013 + SetSocketOptions(socket); + try { socket.Connect(endpoint); @@ -924,10 +1273,9 @@ void Connect(NpgsqlTimeout timeout) var errorCode = (int) socket.GetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Error)!; if (errorCode != 0) throw new SocketException(errorCode); - if (!write.Any()) + if (write.Count is 0) throw new TimeoutException("Timeout during connection attempt"); socket.Blocking = true; - SetSocketOptions(socket); _socket = socket; ConnectedEndPoint = endpoint; return; @@ -950,32 +1298,45 @@ void Connect(NpgsqlTimeout timeout) async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationToken) { - Task GetHostAddressesAsync(CancellationToken ct) => -#if NET6_0_OR_GREATER - Dns.GetHostAddressesAsync(Host, ct); -#else - Dns.GetHostAddressesAsync(Host); -#endif - - // Whether the framework and/or the OS platform support Dns.GetHostAddressesAsync cancellation API or they do not, - // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting - // and raises the exception, while the actual task may be left running. - var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) - ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : (await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken)) - .Select(a => new IPEndPoint(a, Port)).ToArray(); - - // Give each IP an equal share of the remaining time - var perIpTimespan = default(TimeSpan); - var perIpTimeout = timeout; - if (timeout.IsSet) + EndPoint[] endpoints; + if (NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath)) { - perIpTimespan = new TimeSpan(timeout.CheckAndGetTimeLeft().Ticks / endpoints.Length); - perIpTimeout = new NpgsqlTimeout(perIpTimespan); + endpoints = [new UnixDomainSocketEndPoint(socketPath)]; } + else + { + IPAddress[] ipAddresses = []; + using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; + combinedCts?.CancelAfter(timeout.CheckAndGetTimeLeft()); + var combinedToken = combinedCts?.Token ?? cancellationToken; + try + { + ipAddresses = await Dns.GetHostAddressesAsync(Host, combinedToken).ConfigureAwait(false); + } + catch (OperationCanceledException) + { + cancellationToken.ThrowIfCancellationRequested(); + Debug.Assert(timeout.HasExpired); + ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); + } + catch (SocketException ex) + { + throw new NpgsqlException(ex.Message, ex); + } + + endpoints = IPAddressesToEndpoints(ipAddresses, Port); + } + + // Give each endpoint an equal share of the remaining time + var perEndpointTimeout = default(TimeSpan); + if (timeout.IsSet) + perEndpointTimeout = timeout.CheckAndGetTimeLeft() / endpoints.Length; for (var i = 0; i < endpoints.Length; i++) { + var endpointTimeout = timeout.IsSet ? new NpgsqlTimeout(perEndpointTimeout) : timeout; + Debug.Assert(timeout.IsSet == endpointTimeout.IsSet); + var endpoint = endpoints[i]; ConnectionLogger.LogTrace("Attempting to connect to {Endpoint}", endpoint); var protocolType = @@ -986,8 +1347,14 @@ Task GetHostAddressesAsync(CancellationToken ct) => var socket = new Socket(endpoint.AddressFamily, SocketType.Stream, protocolType); try { - await OpenSocketConnectionAsync(socket, endpoint, perIpTimeout, cancellationToken); + // Some options are not applied after the socket is open, see #6013 SetSocketOptions(socket); + + using var combinedCts = endpointTimeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; + combinedCts?.CancelAfter(endpointTimeout.CheckAndGetTimeLeft()); + var combinedToken = combinedCts?.Token ?? cancellationToken; + await socket.ConnectAsync(endpoint, combinedToken).ConfigureAwait(false); + _socket = socket; ConnectedEndPoint = endpoint; return; @@ -1007,6 +1374,8 @@ Task GetHostAddressesAsync(CancellationToken ct) => if (e is OperationCanceledException) e = new TimeoutException("Timeout during connection attempt"); + else if (e is NpgsqlException) + e = e.InnerException!; // We throw NpgsqlException for timeouts, wrapping TimeoutException ConnectionLogger.LogTrace(e, "Failed to connect to {Endpoint}", endpoint); @@ -1014,20 +1383,14 @@ Task GetHostAddressesAsync(CancellationToken ct) => throw new NpgsqlException($"Failed to connect to {endpoint}", e); } } + } - static Task OpenSocketConnectionAsync(Socket socket, EndPoint endpoint, NpgsqlTimeout perIpTimeout, CancellationToken cancellationToken) - { - // Whether the framework and/or the OS platform support Socket.ConnectAsync cancellation API or they do not, - // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting - // and raises the exception, while the actual task may be left running. - Task ConnectAsync(CancellationToken ct) => -#if NET5_0_OR_GREATER - socket.ConnectAsync(endpoint, ct).AsTask(); -#else - socket.ConnectAsync(endpoint); -#endif - return TaskTimeoutAndCancellation.ExecuteAsync(ConnectAsync, perIpTimeout, cancellationToken); - } + EndPoint[] IPAddressesToEndpoints(IPAddress[] ipAddresses, int port) + { + var result = new EndPoint[ipAddresses.Length]; + for (var i = 0; i < ipAddresses.Length; i++) + result[i] = new IPEndPoint(ipAddresses[i], port); + return result; } void SetSocketOptions(Socket socket) @@ -1041,7 +1404,7 @@ void SetSocketOptions(Socket socket) if (Settings.TcpKeepAlive) socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); - if (Settings.TcpKeepAliveInterval > 0 && Settings.TcpKeepAliveTime == 0) + if (Settings is { TcpKeepAliveInterval: > 0, TcpKeepAliveTime: 0 }) throw new ArgumentException("If TcpKeepAliveInterval is defined, TcpKeepAliveTime must be defined as well"); if (Settings.TcpKeepAliveTime > 0) { @@ -1050,144 +1413,14 @@ void SetSocketOptions(Socket socket) ? Settings.TcpKeepAliveInterval : Settings.TcpKeepAliveTime; -#if NETSTANDARD2_0 || NETSTANDARD2_1 - var timeMilliseconds = timeSeconds * 1000; - var intervalMilliseconds = intervalSeconds * 1000; - - // For the following see https://msdn.microsoft.com/en-us/library/dd877220.aspx - var uintSize = Marshal.SizeOf(typeof(uint)); - var inOptionValues = new byte[uintSize * 3]; - BitConverter.GetBytes((uint)1).CopyTo(inOptionValues, 0); - BitConverter.GetBytes((uint)timeMilliseconds).CopyTo(inOptionValues, uintSize); - BitConverter.GetBytes((uint)intervalMilliseconds).CopyTo(inOptionValues, uintSize * 2); - var result = 0; - try - { - result = socket.IOControl(IOControlCode.KeepAliveValues, inOptionValues, null); - } - catch (PlatformNotSupportedException) - { - throw new PlatformNotSupportedException("Setting TCP Keepalive Time and TCP Keepalive Interval is supported only on Windows, Mono and .NET Core 3.1+. " + - "TCP keepalives can still be used on other systems but are enabled via the TcpKeepAlive option or configured globally for the machine, see the relevant docs."); - } - - if (result != 0) - throw new NpgsqlException($"Got non-zero value when trying to set TCP keepalive: {result}"); -#else socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.TcpKeepAliveTime, timeSeconds); socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.TcpKeepAliveInterval, intervalSeconds); -#endif } } #endregion - #region I/O - - readonly ChannelReader? CommandsInFlightReader; - internal readonly ChannelWriter? CommandsInFlightWriter; - - internal volatile int CommandsInFlightCount; - - internal ManualResetValueTaskSource ReaderCompleted { get; } = - new() { RunContinuationsAsynchronously = true }; - - async Task MultiplexingReadLoop() - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(CommandsInFlightReader != null); - - NpgsqlCommand? command = null; - var commandsRead = 0; - - try - { - while (await CommandsInFlightReader.WaitToReadAsync()) - { - commandsRead = 0; - Debug.Assert(!InTransaction); - - while (CommandsInFlightReader.TryRead(out command)) - { - commandsRead++; - - await ReadBuffer.Ensure(5, true); - - // We have a resultset for the command - hand back control to the command (which will - // return it to the user) - command.TraceReceivedFirstResponse(); - ReaderCompleted.Reset(); - command.ExecutionCompletion.SetResult(this); - - // Now wait until that command's reader is disposed. Note that RunContinuationsAsynchronously is - // true, so that the user code calling NpgsqlDataReader.Dispose will not continue executing - // synchronously here. The prevents issues if the code after the next command's execution - // completion blocks. - await new ValueTask(ReaderCompleted, ReaderCompleted.Version); - Debug.Assert(!InTransaction); - } - - // Atomically update the commands in-flight counter, and check if it reached 0. If so, the - // connector is idle and can be returned. - // Note that this is racing with over-capacity writing, which can select any connector at any - // time (see MultiplexingWriteLoop), and we must make absolutely sure that if a connector is - // returned to the pool, it is *never* written to unless properly dequeued from the Idle channel. - if (Interlocked.Add(ref CommandsInFlightCount, -commandsRead) == 0) - { - // There's a race condition where the continuation of an asynchronous multiplexing write may not - // have executed yet, and the flush may still be in progress. We know all I/O has already - // been sent - because the reader has already consumed the entire resultset. So we wait until - // the connector's write lock has been released (long waiting will never occur here). - SpinWait.SpinUntil(() => MultiplexAsyncWritingLock == 0 || IsBroken); - - ResetReadBuffer(); - DataSource.Return(this); - } - } - - ConnectionLogger.LogTrace("Exiting multiplexing read loop", Id); - } - catch (Exception e) - { - Debug.Assert(IsBroken); - - // Decrement the commands already dequeued from the in-flight counter - Interlocked.Add(ref CommandsInFlightCount, -commandsRead); - - // When a connector is broken, the causing exception is stored on it. We fail commands with - // that exception - rather than the one thrown here - since the break may have happened during - // writing, and we want to bubble that one up. - - // Drain any pending in-flight commands and fail them. Note that some have only been written - // to the buffer, and not sent to the server. - command?.ExecutionCompletion.SetException(_breakReason!); - try - { - while (true) - { - var pendingCommand = await CommandsInFlightReader.ReadAsync(); - - // TODO: the exception we have here is sometimes just the result of the write loop breaking - // the connector, so it doesn't represent the actual root cause. - pendingCommand.ExecutionCompletion.SetException(_breakReason!); - } - } - catch (ChannelClosedException) - { - // All good, drained to the channel and failed all commands - } - - // "Return" the connector to the pool to for cleanup (e.g. update total connector count) - DataSource.Return(this); - - ConnectionLogger.LogError(e, "Exception in multiplexing read loop", Id); - } - - Debug.Assert(CommandsInFlightCount == 0); - } - - #endregion #region Frontend message processing @@ -1206,23 +1439,18 @@ internal void PrependInternalMessage(byte[] rawMessage, int responseMessageCount #region Backend message processing - internal ValueTask ReadMessage(bool async, DataRowLoadingMode dataRowLoadingMode = DataRowLoadingMode.NonSequential) - => ReadMessage(async, dataRowLoadingMode, readingNotifications: false)!; - internal ValueTask ReadMessageWithNotifications(bool async) - => ReadMessage(async, DataRowLoadingMode.NonSequential, readingNotifications: true); + => ReadMessageLong(async, DataRowLoadingMode.NonSequential, readingNotifications: true); - ValueTask ReadMessage( + internal ValueTask ReadMessage( bool async, - DataRowLoadingMode dataRowLoadingMode, - bool readingNotifications) + DataRowLoadingMode dataRowLoadingMode = DataRowLoadingMode.NonSequential) { if (PendingPrependedResponses > 0 || - dataRowLoadingMode != DataRowLoadingMode.NonSequential || - readingNotifications || + dataRowLoadingMode == DataRowLoadingMode.Skip || ReadBuffer.ReadBytesLeft < 5) { - return ReadMessageLong(this, async, dataRowLoadingMode, readingNotifications); + return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; } var messageCode = (BackendMessageCode)ReadBuffer.ReadByte(); @@ -1233,172 +1461,190 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d case BackendMessageCode.ParameterStatus: case BackendMessageCode.ErrorResponse: ReadBuffer.ReadPosition--; - return ReadMessageLong(this, async, dataRowLoadingMode, readingNotifications: false); - case BackendMessageCode.ReadyForQuery: - break; + return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; } - PGUtil.ValidateBackendMessageCode(messageCode); + ValidateBackendMessageCode(messageCode); var len = ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself if (len > ReadBuffer.ReadBytesLeft) { ReadBuffer.ReadPosition -= 5; - return ReadMessageLong(this, async, dataRowLoadingMode, readingNotifications: false); + return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; } - return new ValueTask(ParseServerMessage(ReadBuffer, messageCode, len, false)); + return new ValueTask(ParseServerMessage(ReadBuffer, messageCode, len, false))!; + } - static async ValueTask ReadMessageLong( - NpgsqlConnector connector, - bool async, - DataRowLoadingMode dataRowLoadingMode, - bool readingNotifications, - bool isReadingPrependedMessage = false) + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask ReadMessageLong( + bool async, + DataRowLoadingMode dataRowLoadingMode, + bool readingNotifications, + bool isReadingPrependedMessage = false) + { + // First read the responses of any prepended messages. + if (PendingPrependedResponses > 0 && !isReadingPrependedMessage) { - // First read the responses of any prepended messages. - if (connector.PendingPrependedResponses > 0 && !isReadingPrependedMessage) + try { - try - { - // TODO: There could be room for optimization here, rather than the async call(s) - connector.ReadBuffer.Timeout = TimeSpan.FromMilliseconds(connector.InternalCommandTimeout); - for (; connector.PendingPrependedResponses > 0; connector.PendingPrependedResponses--) - await ReadMessageLong(connector, async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true); - } - catch (PostgresException e) - { - throw connector.Break(e); - } + // TODO: There could be room for optimization here, rather than the async call(s) + for (; PendingPrependedResponses > 0; PendingPrependedResponses--) + await ReadMessageLong(async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true).ConfigureAwait(false); + // We've read all the prepended response. + // Allow cancellation to proceed. + ReadingPrependedMessagesMRE.Set(); + + // User requested cancellation but it hasn't been performed yet. + // This might happen if the cancellation is requested while we're reading prepended responses + // because we shouldn't cancel them and otherwise might deadlock. + if (UserCancellationRequested && !PostgresCancellationPerformed) + PerformDelayedUserCancellation(); + } + catch (Exception e) + { + // Prepended queries should never fail. + // If they do, we're not even going to attempt to salvage the connector. + Break(e); + throw; } + } - PostgresException? error = null; + PostgresException? error = null; - try + try + { + while (true) { - connector.ReadBuffer.Timeout = TimeSpan.FromMilliseconds(connector.UserTimeout); - - while (true) + await ReadBuffer.Ensure(5, async, readingNotifications).ConfigureAwait(false); + var messageCode = (BackendMessageCode)ReadBuffer.ReadByte(); + ValidateBackendMessageCode(messageCode); + var len = ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself + + if ((messageCode == BackendMessageCode.DataRow && + dataRowLoadingMode != DataRowLoadingMode.NonSequential) || + messageCode == BackendMessageCode.CopyData) { - await connector.ReadBuffer.Ensure(5, async, readingNotifications); - var messageCode = (BackendMessageCode)connector.ReadBuffer.ReadByte(); - PGUtil.ValidateBackendMessageCode(messageCode); - var len = connector.ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself - - if ((messageCode == BackendMessageCode.DataRow && - dataRowLoadingMode != DataRowLoadingMode.NonSequential) || - messageCode == BackendMessageCode.CopyData) + if (dataRowLoadingMode == DataRowLoadingMode.Skip) { - if (dataRowLoadingMode == DataRowLoadingMode.Skip) - { - await connector.ReadBuffer.Skip(len, async); - continue; - } + await ReadBuffer.Skip(async, len).ConfigureAwait(false); + continue; } - else if (len > connector.ReadBuffer.ReadBytesLeft) + } + else if (len > ReadBuffer.ReadBytesLeft) + { + if (len > ReadBuffer.Size) { - if (len > connector.ReadBuffer.Size) - { - var oversizeBuffer = connector.ReadBuffer.AllocateOversize(len); + var oversizeBuffer = ReadBuffer.AllocateOversize(len); - if (connector._origReadBuffer == null) - connector._origReadBuffer = connector.ReadBuffer; - else - connector.ReadBuffer.Dispose(); - - connector.ReadBuffer = oversizeBuffer; - } + if (_origReadBuffer == null) + _origReadBuffer = ReadBuffer; + else + ReadBuffer.Dispose(); - await connector.ReadBuffer.Ensure(len, async); + ReadBuffer = oversizeBuffer; } - var msg = connector.ParseServerMessage(connector.ReadBuffer, messageCode, len, isReadingPrependedMessage); + await ReadBuffer.Ensure(len, async).ConfigureAwait(false); + } + + var msg = ParseServerMessage(ReadBuffer, messageCode, len, isReadingPrependedMessage); - switch (messageCode) + switch (messageCode) + { + case BackendMessageCode.ErrorResponse: + Debug.Assert(msg == null); + + // An ErrorResponse is (almost) always followed by a ReadyForQuery. Save the error + // and throw it as an exception when the ReadyForQuery is received (next). + error = PostgresException.Load( + ReadBuffer, + Settings.IncludeErrorDetail, + LoggingConfiguration.ExceptionLogger); + + if (State == ConnectorState.Connecting) { - case BackendMessageCode.ErrorResponse: - Debug.Assert(msg == null); - - // An ErrorResponse is (almost) always followed by a ReadyForQuery. Save the error - // and throw it as an exception when the ReadyForQuery is received (next). - error = PostgresException.Load( - connector.ReadBuffer, - connector.Settings.IncludeErrorDetail, - connector.LoggingConfiguration.ExceptionLogger); - - if (connector.State == ConnectorState.Connecting) - { - // During the startup/authentication phase, an ErrorResponse isn't followed by - // an RFQ. Instead, the server closes the connection immediately - throw error; - } - - if (PostgresErrorCodes.IsCriticalFailure(error, clusterError: false)) - { - // Consider the connection dead - throw connector.Break(error); - } + // During the startup/authentication phase, an ErrorResponse isn't followed by + // an RFQ. Instead, the server closes the connection immediately + throw error; + } - continue; + if (PostgresErrorCodes.IsCriticalFailure(error, clusterError: false)) + { + // Consider the connection dead + throw Break(error); + } + + continue; - case BackendMessageCode.ReadyForQuery: - if (error != null) - { - NpgsqlEventSource.Log.CommandFailed(); - throw error; - } - - break; - - // Asynchronous messages which can come anytime, they have already been handled - // in ParseServerMessage. Read the next message. - case BackendMessageCode.NoticeResponse: - case BackendMessageCode.NotificationResponse: - case BackendMessageCode.ParameterStatus: - Debug.Assert(msg == null); - if (!readingNotifications) - continue; - return null; + case BackendMessageCode.ReadyForQuery: + if (error != null) + { + NpgsqlEventSource.Log.CommandFailed(); + DataSource.MetricsReporter.ReportCommandFailed(); + throw error; } - Debug.Assert(msg != null, "Message is null for code: " + messageCode); - return msg; - } - } - catch (PostgresException e) - { - // TODO: move it up the stack, like #3126 did (relevant for non-command-execution scenarios, like COPY) - if (connector.CurrentReader is null) - connector.EndUserAction(); + break; - if (e.SqlState == PostgresErrorCodes.QueryCanceled && connector.PostgresCancellationPerformed) - { - // The query could be canceled because of a user cancellation or a timeout - raise the proper exception. - // If _postgresCancellationPerformed is false, this is an unsolicited cancellation - - // just bubble up thePostgresException. - throw connector.UserCancellationRequested - ? new OperationCanceledException("Query was cancelled", e, connector.UserCancellationToken) - : new NpgsqlException("Exception while reading from stream", - new TimeoutException("Timeout during reading attempt")); + // Asynchronous messages which can come anytime, they have already been handled + // in ParseServerMessage. Read the next message. + case BackendMessageCode.NoticeResponse: + case BackendMessageCode.NotificationResponse: + case BackendMessageCode.ParameterStatus: + Debug.Assert(msg == null); + if (!readingNotifications) + continue; + return null; } - throw; + Debug.Assert(msg != null, "Message is null for code: " + messageCode); + + // Rebase the cumulative buffer-end counter after any RFQ or in between potentially long-running operations. + // Just in case we'll hit that 15 exbibyte limit of a signed long... + if (messageCode is BackendMessageCode.ReadyForQuery or BackendMessageCode.CopyData or BackendMessageCode.NotificationResponse) + ReadBuffer.RebaseBufferEndPosition(); + + return msg; } - catch (NpgsqlException) + } + catch (PostgresException e) + { + if (e.SqlState == PostgresErrorCodes.QueryCanceled && PostgresCancellationPerformed) { - // An ErrorResponse isn't followed by ReadyForQuery - if (error != null) - ExceptionDispatchInfo.Capture(error).Throw(); - throw; + // The query could be canceled because of a user cancellation or a timeout - raise the proper exception. + // If _postgresCancellationPerformed is false, this is an unsolicited cancellation - + // just bubble up thePostgresException. + throw UserCancellationRequested + ? new OperationCanceledException("Query was cancelled", e, UserCancellationToken) + : new NpgsqlException("Exception while reading from stream", + new TimeoutException("Timeout during reading attempt")); } + + throw; + } + catch (NpgsqlException) + { + // An ErrorResponse isn't followed by ReadyForQuery + if (error != null) + ExceptionDispatchInfo.Capture(error).Throw(); + throw; } } - internal IBackendMessage? ParseServerMessage(NpgsqlReadBuffer buf, BackendMessageCode code, int len, bool isPrependedMessage) + internal IBackendMessage? ParseResultSetMessage(NpgsqlReadBuffer buf, BackendMessageCode code, int len, bool handleCallbacks = false) + => code switch + { + BackendMessageCode.DataRow => _dataRowMessage.Load(len), + BackendMessageCode.CommandComplete => _commandCompleteMessage.Load(buf, len), + _ => ParseServerMessage(buf, code, len, false, handleCallbacks) + }; + + internal IBackendMessage? ParseServerMessage(NpgsqlReadBuffer buf, BackendMessageCode code, int len, bool isPrependedMessage, bool handleCallbacks = true) { switch (code) { case BackendMessageCode.RowDescription: - return _rowDescriptionMessage.Load(buf, TypeMapper); + return _rowDescriptionMessage.Load(buf, SerializerOptions); case BackendMessageCode.DataRow: return _dataRowMessage.Load(len); case BackendMessageCode.CommandComplete: @@ -1429,27 +1675,33 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d ReadParameterStatus(buf.GetNullTerminatedBytes(), buf.GetNullTerminatedBytes()); return null; case BackendMessageCode.NoticeResponse: - var notice = PostgresNotice.Load(buf, Settings.IncludeErrorDetail, LoggingConfiguration.ExceptionLogger); - LogMessages.ReceivedNotice(ConnectionLogger, notice.MessageText, Id); - Connection?.OnNotice(notice); + if (handleCallbacks) + { + var notice = PostgresNotice.Load(buf, Settings.IncludeErrorDetail, LoggingConfiguration.ExceptionLogger); + LogMessages.ReceivedNotice(ConnectionLogger, notice.MessageText, Id); + Connection?.OnNotice(notice); + } return null; case BackendMessageCode.NotificationResponse: - Connection?.OnNotification(new NpgsqlNotificationEventArgs(buf)); + if (handleCallbacks) + { + Connection?.OnNotification(new NpgsqlNotificationEventArgs(buf)); + } return null; case BackendMessageCode.AuthenticationRequest: var authType = (AuthenticationRequestType)buf.ReadInt32(); return authType switch { - AuthenticationRequestType.AuthenticationOk => (AuthenticationRequestMessage)AuthenticationOkMessage.Instance, - AuthenticationRequestType.AuthenticationCleartextPassword => AuthenticationCleartextPasswordMessage.Instance, - AuthenticationRequestType.AuthenticationMD5Password => AuthenticationMD5PasswordMessage.Load(buf), - AuthenticationRequestType.AuthenticationGSS => AuthenticationGSSMessage.Instance, - AuthenticationRequestType.AuthenticationSSPI => AuthenticationSSPIMessage.Instance, - AuthenticationRequestType.AuthenticationGSSContinue => AuthenticationGSSContinueMessage.Load(buf, len), - AuthenticationRequestType.AuthenticationSASL => new AuthenticationSASLMessage(buf), - AuthenticationRequestType.AuthenticationSASLContinue => new AuthenticationSASLContinueMessage(buf, len - 4), - AuthenticationRequestType.AuthenticationSASLFinal => new AuthenticationSASLFinalMessage(buf, len - 4), + AuthenticationRequestType.Ok => AuthenticationOkMessage.Instance, + AuthenticationRequestType.CleartextPassword => AuthenticationCleartextPasswordMessage.Instance, + AuthenticationRequestType.MD5Password => AuthenticationMD5PasswordMessage.Load(buf), + AuthenticationRequestType.GSS => AuthenticationGSSMessage.Instance, + AuthenticationRequestType.SSPI => AuthenticationSSPIMessage.Instance, + AuthenticationRequestType.GSSContinue => AuthenticationGSSContinueMessage.Load(buf, len), + AuthenticationRequestType.SASL => new AuthenticationSASLMessage(buf), + AuthenticationRequestType.SASLContinue => new AuthenticationSASLContinueMessage(buf, len - 4), + AuthenticationRequestType.SASLFinal => new AuthenticationSASLFinalMessage(buf, len - 4), _ => throw new NotSupportedException($"Authentication method not supported (Received: {authType})") }; @@ -1468,17 +1720,15 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d case BackendMessageCode.CopyDone: return CopyDoneMessage.Instance; - case BackendMessageCode.PortalSuspended: - throw new NpgsqlException("Unimplemented message: " + code); case BackendMessageCode.ErrorResponse: return null; + case BackendMessageCode.PortalSuspended: case BackendMessageCode.FunctionCallResponse: // We don't use the obsolete function call protocol - throw new NpgsqlException("Unexpected backend message: " + code); - default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {code} of enum {nameof(BackendMessageCode)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {code} of enum {nameof(BackendMessageCode)}. Please file a bug."); + return null; } } @@ -1512,14 +1762,23 @@ internal Task Rollback(bool async, CancellationToken cancellationToken = default } internal bool InTransaction - => TransactionStatus switch + { + get { - TransactionStatus.Idle => false, - TransactionStatus.Pending => true, - TransactionStatus.InTransactionBlock => true, - TransactionStatus.InFailedTransactionBlock => true, - _ => throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug.") - }; + switch (TransactionStatus) + { + case TransactionStatus.Idle: + return false; + case TransactionStatus.Pending: + case TransactionStatus.InTransactionBlock: + case TransactionStatus.InFailedTransactionBlock: + return true; + default: + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(TransactionStatus)}. Please file a bug.", TransactionStatus); + return false; + } + } + } /// /// Handles a new transaction indicator received on a ReadyForQuery message @@ -1534,23 +1793,15 @@ void ProcessNewTransactionStatus(TransactionStatus newStatus) switch (newStatus) { case TransactionStatus.Idle: - break; case TransactionStatus.InTransactionBlock: case TransactionStatus.InFailedTransactionBlock: - // In multiplexing mode, we can't support transaction in SQL: the connector must be removed from the - // writable connectors list, otherwise other commands may get written to it. So the user must tell us - // about the transaction via BeginTransaction. - if (Connection is null) - { - Debug.Assert(Settings.Multiplexing); - throw new NotSupportedException("In multiplexing mode, transactions must be started with BeginTransaction"); - } - break; + return; case TransactionStatus.Pending: - throw new Exception($"Internal Npgsql bug: invalid TransactionStatus {nameof(TransactionStatus.Pending)} received, should be frontend-only"); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: invalid TransactionStatus {nameof(TransactionStatus.Pending)} received, should be frontend-only"); + return; default: - throw new InvalidOperationException( - $"Internal Npgsql bug: unexpected value {newStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(TransactionStatus)}. Please file a bug.", newStatus); + return; } } @@ -1567,15 +1818,20 @@ internal void ClearTransaction(Exception? disposeReason = null) /// /// Returns whether SSL is being used for the connection /// - internal bool IsSecure { get; private set; } + internal bool IsSslEncrypted { get; private set; } + + /// + /// Returns whether GSS is being used for the connection + /// + internal bool IsGssEncrypted { get; private set; } /// - /// Returns whether SCRAM-SHA256 is being user for the connection + /// Returns whether SCRAM-SHA256 is being used for the connection /// internal bool IsScram { get; private set; } /// - /// Returns whether SCRAM-SHA256-PLUS is being user for the connection + /// Returns whether SCRAM-SHA256-PLUS is being used for the connection /// internal bool IsScramPlus { get; private set; } @@ -1591,19 +1847,14 @@ internal void ClearTransaction(Exception? disposeReason = null) (sender, certificate, chain, sslPolicyErrors) => true; - static RemoteCertificateValidationCallback SslRootValidation(string certRootPath, bool verifyFull) => - (sender, certificate, chain, sslPolicyErrors) => + static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, string? certRootPath, X509Certificate2Collection? caCertificates) + => (_, certificate, chain, sslPolicyErrors) => { if (certificate is null || chain is null) return false; - // No errors here - no reason to check further - if (sslPolicyErrors == SslPolicyErrors.None) - return true; - - // That's VerifyCA check and the only error is name mismatch - no reason to check further - if (!verifyFull && sslPolicyErrors == SslPolicyErrors.RemoteCertificateNameMismatch) - return true; + // Even if there was no error while validating, we have to check one more time with the provided certificate + // As this is the exact same behavior as libpq // That's VerifyFull check and we have name mismatch - no reason to check further if (verifyFull && sslPolicyErrors.HasFlag(SslPolicyErrors.RemoteCertificateNameMismatch)) @@ -1611,18 +1862,26 @@ static RemoteCertificateValidationCallback SslRootValidation(string certRootPath var certs = new X509Certificate2Collection(); -#if NET5_0_OR_GREATER - if (Path.GetExtension(certRootPath).ToUpperInvariant() != ".PFX") - certs.ImportFromPemFile(certRootPath); -#endif + if (certRootPath is null) + { + Debug.Assert(caCertificates is { Count: > 0 }); + certs.AddRange(caCertificates); + } + else + { + Debug.Assert(caCertificates is null or { Count: > 0 }); + if (Path.GetExtension(certRootPath).ToUpperInvariant() != ".PFX") + certs.ImportFromPemFile(certRootPath); - if (certs.Count == 0) - certs.Add(new X509Certificate2(certRootPath)); + if (certs.Count == 0) + { + // This is not a PEM certificate, probably PFX + certs.Add(X509CertificateLoader.LoadPkcs12FromFile(certRootPath, null)); + } + } -#if NET5_0_OR_GREATER chain.ChainPolicy.CustomTrustStore.AddRange(certs); chain.ChainPolicy.TrustMode = X509ChainTrustMode.CustomRootTrust; -#endif chain.ChainPolicy.ExtraStore.AddRange(certs); @@ -1633,52 +1892,74 @@ static RemoteCertificateValidationCallback SslRootValidation(string certRootPath #region Cancel - internal void PerformUserCancellation() + internal void ResetCancellation() + { + // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) + lock (CancelLock) + { + if (PendingPrependedResponses > 0) + ReadingPrependedMessagesMRE.Reset(); + Debug.Assert(ReadingPrependedMessagesMRE.IsSet || PendingPrependedResponses > 0); + } + } + + internal void PerformImmediateUserCancellation() { var connection = Connection; - if (connection is null || connection.ConnectorBindingScope == ConnectorBindingScope.Reader) + if (connection is null || UserCancellationRequested) return; - // There's a subtle race condition where cancellation may be happening just as Break is called. Break takes the connector lock, and - // then ends the user action; this disposes the cancellation token registration, which waits until the cancellation callback - // completes. But the callback needs to take the connector lock below, which led to a deadlock (#4654). - // As a result, Break takes CancelLock, and we abort the cancellation attempt immediately if we can't get it here. - if (!Monitor.TryEnter(CancelLock)) - return; + // Take the lock first to make sure there is no concurrent Break. + // We should be safe to take it as Break only take it to set the state. + lock (SyncObj) + { + // The connector is dead, exit gracefully. + if (!IsConnected) + return; + // The connector is still alive, take the CancelLock before exiting SingleUseLock. + // If a break will happen after, it's going to wait for the cancellation to complete. + Monitor.Enter(CancelLock); + } try { + // Set the flag first before waiting on ReadingPrependedMessagesMRE. + // That way we're making sure that in case we're racing with ReadingPrependedMessagesMRE.Set + // that it's going to read the new value of the flag and request cancellation _userCancellationRequested = true; - if (AttemptPostgresCancellation && SupportsPostgresCancellation) - { - var cancellationTimeout = Settings.CancellationTimeout; - if (PerformPostgresCancellation() && cancellationTimeout >= 0) - { - if (cancellationTimeout > 0) - { - lock (this) - { - if (!IsConnected) - return; - UserTimeout = cancellationTimeout; - ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - ReadBuffer.Cts.CancelAfter(cancellationTimeout); - } - } + // Check whether we've read all responses for the prepended queries + // as we can't gracefully handle their cancellation. + // We don't wait indefinitely to avoid deadlocks from synchronous CancellationToken.Register + // See #5032 + if (!ReadingPrependedMessagesMRE.Wait(0)) + return; - return; - } - } + PerformUserCancellationUnsynchronized(); + } + finally + { + Monitor.Exit(CancelLock); + } + } - lock (this) - { - if (!IsConnected) - return; - UserTimeout = -1; - ReadBuffer.Timeout = _cancelImmediatelyTimeout; - ReadBuffer.Cts.Cancel(); - } + void PerformDelayedUserCancellation() + { + // Take the lock first to make sure there is no concurrent Break. + // We should be safe to take it as Break only take it to set the state. + lock (SyncObj) + { + // The connector is dead, exit gracefully. + if (!IsConnected) + return; + // The connector is still alive, take the CancelLock before exiting SingleUseLock. + // If a break will happen after, it's going to wait for the cancellation to complete. + Monitor.Enter(CancelLock); + } + + try + { + PerformUserCancellationUnsynchronized(); } finally { @@ -1686,6 +1967,29 @@ internal void PerformUserCancellation() } } + void PerformUserCancellationUnsynchronized() + { + if (AttemptPostgresCancellation && SupportsPostgresCancellation) + { + var cancellationTimeout = Settings.CancellationTimeout; + if (PerformPostgresCancellation() && cancellationTimeout >= 0) + { + // TODO: according to docs, we treat 0 timeout as infinite, yet we do not change the actual value + // We should revisit this here and in NpgsqlReadBuffer + if (cancellationTimeout > 0) + { + ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + ReadBuffer.Cts.CancelAfter(cancellationTimeout); + } + + return; + } + } + + ReadBuffer.Timeout = _cancelImmediatelyTimeout; + ReadBuffer.Cts.Cancel(); + } + /// /// Creates another connector and sends a cancel request through it for this connector. This method never throws, but returns /// whether the cancellation attempt failed. @@ -1735,11 +2039,36 @@ internal bool PerformPostgresCancellation() void DoCancelRequest(int backendProcessId, int backendSecretKey) { Debug.Assert(State == ConnectorState.Closed); + var gssEncMode = GetGssEncMode(Settings); try { - RawOpen(Settings.SslMode, new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)), false, CancellationToken.None) - .GetAwaiter().GetResult(); + try + { + var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); + RawOpen(timeout, false, + CancellationToken.None) + .GetAwaiter().GetResult(); + SetupEncryption(Settings.SslMode, gssEncMode, timeout, false, + CancellationToken.None). + GetAwaiter().GetResult(); + } + catch (Exception e) when (gssEncMode == GssEncryptionMode.Prefer) + { + ConnectionLogger.LogTrace(e, "Error while opening physical connection with GSS encryption, retrying without it"); + Cleanup(); + + // If we hit an error with gss encryption + // Retry again without it + var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); + RawOpen(timeout, false, + CancellationToken.None) + .GetAwaiter().GetResult(); + SetupEncryption(Settings.SslMode, GssEncryptionMode.Disable, timeout, false, + CancellationToken.None). + GetAwaiter().GetResult(); + } + WriteCancelRequest(backendProcessId, backendSecretKey); Flush(); @@ -1753,8 +2082,7 @@ void DoCancelRequest(int backendProcessId, int backendSecretKey) } finally { - lock (this) - FullCleanup(); + FullCleanup(); } } @@ -1769,7 +2097,7 @@ internal CancellationTokenRegistration StartCancellableOperation( AttemptPostgresCancellation = attemptPgCancellation; return _cancellationTokenRegistration = - cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformUserCancellation(), this); + cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformImmediateUserCancellation(), this); } /// @@ -1792,15 +2120,36 @@ internal CancellationTokenRegistration StartCancellableOperation( /// PostgreSQL cancellation will be skipped and client-socket cancellation will occur immediately. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal CancellationTokenRegistration StartNestedCancellableOperation( + internal NestedCancellableScope StartNestedCancellableOperation( CancellationToken cancellationToken = default, bool attemptPgCancellation = true) { + var currentUserCancellationToken = UserCancellationToken; UserCancellationToken = cancellationToken; + var currentAttemptPostgresCancellation = AttemptPostgresCancellation; AttemptPostgresCancellation = attemptPgCancellation; - return _cancellationTokenRegistration = - cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformUserCancellation(), this); + var registration = cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformImmediateUserCancellation(), this); + + return new(this, registration, currentUserCancellationToken, currentAttemptPostgresCancellation); + } + + internal readonly struct NestedCancellableScope( + NpgsqlConnector connector, + CancellationTokenRegistration registration, + CancellationToken previousCancellationToken, + bool previousAttemptPostgresCancellation) + : IDisposable + { + public void Dispose() + { + if (connector is null) + return; + + connector.UserCancellationToken = previousCancellationToken; + connector.AttemptPostgresCancellation = previousAttemptPostgresCancellation; + registration.Dispose(); + } } #endregion Cancel @@ -1817,7 +2166,7 @@ internal async Task CloseOngoingOperations(bool async) var copyOperation = CurrentCopyOperation; if (reader != null) - await reader.Close(connectionClosing: true, async, isDisposing: false); + await reader.Close(async, connectionClosing: true, isDisposing: false).ConfigureAwait(false); else if (copyOperation != null) { // TODO: There's probably a race condition as the COPY operation may finish on its own during the next few lines @@ -1827,12 +2176,12 @@ internal async Task CloseOngoingOperations(bool async) // therefore vulnerable to the race condition in #615. if (copyOperation is NpgsqlBinaryImporter || copyOperation is NpgsqlCopyTextWriter || - copyOperation is NpgsqlRawCopyStream rawCopyStream && rawCopyStream.CanWrite) + copyOperation is NpgsqlRawCopyStream { CanWrite: true }) { try { if (async) - await copyOperation.CancelAsync(); + await copyOperation.CancelAsync().ConfigureAwait(false); else copyOperation.Cancel(); } @@ -1845,7 +2194,7 @@ copyOperation is NpgsqlCopyTextWriter || try { if (async) - await copyOperation.DisposeAsync(); + await copyOperation.DisposeAsync().ConfigureAwait(false); else copyOperation.Dispose(); } @@ -1860,7 +2209,7 @@ copyOperation is NpgsqlCopyTextWriter || // very unlikely to block (plus locking would need to be worked out) internal void Close() { - lock (this) + lock (SyncObj) { if (IsReady) { @@ -1889,19 +2238,14 @@ internal void Close() } State = ConnectorState.Closed; - FullCleanup(); - LogMessages.ClosedPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString, Id); } - } - internal bool TryRemovePendingEnlistedConnector(Transaction transaction) - => DataSource.TryRemovePendingEnlistedConnector(this, transaction); + FullCleanup(); + LogMessages.ClosedPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString, Id); + } internal void Return() => DataSource.Return(this); - /// - public void Dispose() => Close(); - /// /// Called when an unexpected message has been received during an action. Breaks the /// connector and returns the appropriate message. @@ -1915,91 +2259,108 @@ internal Exception UnexpectedMessageReceived(BackendMessageCode received) /// Note that fatal errors during the Open phase do *not* pass through here. /// /// The exception that caused the break. + /// Whether we treat host as down, even if we're still connecting to PostgreSQL instance. /// The exception given in for chaining calls. - internal Exception Break(Exception reason) + internal Exception Break(Exception reason, bool markHostAsOfflineOnConnecting = false) { Debug.Assert(!IsClosed); - // See PerformUserCancellation on why we take CancelLock - lock (CancelLock) - lock (this) - { - if (State == ConnectorState.Broken) - return reason; + Monitor.Enter(SyncObj); - // Note we only set the cluster to offline and clear the pool if the connection is being broken (we're in this method), - // *and* the exception indicates that the PG cluster really is down; the latter includes any IO/timeout issue, - // but does not include e.g. authentication failure or timeouts with disabled cancellation. - if (reason is NpgsqlException { IsTransient: true } ne && - (ne.InnerException is not TimeoutException || Settings.CancellationTimeout != -1) || - reason is PostgresException pe && PostgresErrorCodes.IsCriticalFailure(pe)) - { - DataSource.UpdateDatabaseState(DatabaseState.Offline, DateTime.UtcNow, Settings.HostRecheckSecondsTranslated); - DataSource.Clear(); - } + var state = State; + if (state == ConnectorState.Broken) + { + // We're already broken. + // Exit SingleUseLock to unblock other threads (like cancellation). + Monitor.Exit(SyncObj); + // Wait for the break to complete before going forward. + lock (CleanupLock) { } + return reason; + } + try + { LogMessages.BreakingConnection(ConnectionLogger, Id, reason); // Note that we may be reading and writing from the same connector concurrently, so safely set // the original reason for the break before actually closing the socket etc. Interlocked.CompareExchange(ref _breakReason, reason, null); State = ConnectorState.Broken; + // Take the CleanupLock while in SingleUseLock to make sure concurrent Break doesn't take it first. + Monitor.Enter(CleanupLock); + } + finally + { + // Unblock other threads (like cancellation) to proceed and exit gracefully. + Monitor.Exit(SyncObj); + } - var connection = Connection; + try + { + // Make sure there is no concurrent cancellation in process + lock (CancelLock) + { + // Note we only set the cluster to offline and clear the pool if the connection is being broken (we're in this method), + // *and* the exception indicates that the PG cluster really is down; the latter includes any IO/timeout issue, + // but does not include e.g. authentication failure or timeouts with disabled cancellation. + // We also do not treat host as down if we're still connecting, as we might retry without GSS/TLS + if (reason is NpgsqlException { IsTransient: true } ne && + (state != ConnectorState.Connecting || markHostAsOfflineOnConnecting) && + (ne.InnerException is not TimeoutException || Settings.CancellationTimeout != -1) || + reason is PostgresException pe && PostgresErrorCodes.IsCriticalFailure(pe)) + { + DataSource.UpdateDatabaseState(DatabaseState.Offline, DateTime.UtcNow, Settings.HostRecheckSecondsTranslated); + DataSource.Clear(); + } - FullCleanup(); + var connection = Connection; - if (connection is not null) - { - var closeLockTaken = connection.TakeCloseLock(); - Debug.Assert(closeLockTaken); - if (Settings.ReplicationMode == ReplicationMode.Off) + Cleanup(); + + if (connection is not null) { - // When a connector is broken, we immediately "return" it to the pool (i.e. update the pool state so reflect the - // connector no longer being open). Upper layers such as EF may check DbConnection.ConnectionState, and only close if - // it's closed; so we can't set the state to Closed and expect the user to still close (in order to return to the pool). - // On the other hand leaving the state Open could indicate to the user that the connection is functional. - // (see https://github.com/npgsql/npgsql/issues/3705#issuecomment-839908772) - Connection = null; - if (connection.ConnectorBindingScope != ConnectorBindingScope.None) + var closeLockTaken = connection.TakeCloseLock(); + Debug.Assert(closeLockTaken); + if (Settings.ReplicationMode == ReplicationMode.Off) + { + // When a connector is broken, we immediately "return" it to the pool (i.e. update the pool state so reflect the + // connector no longer being open). Upper layers such as EF may check DbConnection.ConnectionState, and only close if + // it's closed; so we can't set the state to Closed and expect the user to still close (in order to return to the pool). + // On the other hand leaving the state Open could indicate to the user that the connection is functional. + // (see https://github.com/npgsql/npgsql/issues/3705#issuecomment-839908772) + Connection = null; Return(); - connection.EnlistedTransaction = null; - connection.Connector = null; - connection.ConnectorBindingScope = ConnectorBindingScope.None; + connection.EnlistedTransaction = null; + connection.Connector = null; + } + + connection.FullState = ConnectionState.Broken; + connection.ReleaseCloseLock(); } - connection.FullState = ConnectionState.Broken; - connection.ReleaseCloseLock(); + return reason; } - - return reason; + } + finally + { + Monitor.Exit(CleanupLock); } } - + void FullCleanup() { - Debug.Assert(Monitor.IsEntered(this)); - - if (Settings.Multiplexing) + lock (CleanupLock) { - FlagAsNotWritableForMultiplexing(); - - // Note that in multiplexing, this could be called from the read loop, while the write loop is - // writing into the channel. To make sure this race condition isn't a problem, the channel currently - // isn't set up with SingleWriter (since at this point it doesn't do anything). - CommandsInFlightWriter!.Complete(); - - // The connector's read loop has a continuation to observe and log any exception coming out - // (see Open) - } + ConnectionLogger.LogTrace("Cleaning up connector", Id); + Cleanup(); - ConnectionLogger.LogTrace("Cleaning up connector", Id); - Cleanup(); + if (_isKeepAliveEnabled) + { + _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); + _keepAliveTimer.Dispose(); + } - if (_isKeepAliveEnabled) - { - _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); - _keepAliveTimer.Dispose(); + ReadingPrependedMessagesMRE.Dispose(); } } @@ -2007,10 +2368,39 @@ void FullCleanup() /// Closes the socket and cleans up client-side resources associated with this connector. /// /// - /// This method doesn't actually perform any meaningful I/O, and therefore is sync-only. + /// This method doesn't actually perform any meaningful I/O (except sending TLS alert), and therefore is sync-only. /// void Cleanup() { + var sslStream = _stream as SslStream; + if (sslStream is not null) + { + try + { + // Send close_notify TLS alert to correctly close connection on postgres's side + sslStream.ShutdownAsync().GetAwaiter().GetResult(); + // Theoretically we should do a 0 read here to receive server's close_notify alert + // But overall it doesn't look like it makes much of a difference + } + catch + { + // ignored + } + } + + // After we access SslStream.RemoteCertificate (like for SASLSha256Plus) + // SslStream will no longer dispose it for us automatically + // Which is why we have to do it ourselves before disposing the stream + // As otherwise accessing RemoteCertificate will throw an exception + try + { + sslStream?.RemoteCertificate?.Dispose(); + } + catch + { + // ignored + } + try { _stream?.Dispose(); @@ -2064,10 +2454,19 @@ void Cleanup() Connection = null; PostgresParameters.Clear(); _currentCommand = null; + + _certificates?.ForEach(x => x.Dispose()); + _certificates = null; } + [MemberNotNull(nameof(_resetWithoutDeallocateMessage))] void GenerateResetMessage() { + // Generate a reset message that resets connection state without using DISCARD ALL. + // This is used in two scenarios: + // 1. When closing a pooled connection that has prepared statements (DISCARD ALL would deallocate them) + // 2. When closing a connection within an enlisted System.Transactions transaction (DISCARD ALL cannot + // run inside a transaction block, but its component commands can) var sb = new StringBuilder("SET SESSION AUTHORIZATION DEFAULT;RESET ALL;"); _resetWithoutDeallocateResponseCount = 2; if (DatabaseInfo.SupportsCloseAll) @@ -2109,8 +2508,6 @@ void GenerateResetMessage() /// internal async Task Reset(bool async) { - bool endBindingScope; - // We start user action in case a keeplive happens concurrently, or a concurrent user command (bug) using (StartUserAction(attemptPgCancellation: false)) { @@ -2127,24 +2524,21 @@ internal async Task Reset(bool async) switch (TransactionStatus) { case TransactionStatus.Idle: - // There is an undisposed transaction on multiplexing connection - endBindingScope = Connection?.ConnectorBindingScope == ConnectorBindingScope.Transaction; break; case TransactionStatus.Pending: // BeginTransaction() was called, but was left in the write buffer and not yet sent to server. // Just clear the transaction state. ProcessNewTransactionStatus(TransactionStatus.Idle); ClearTransaction(); - endBindingScope = true; break; case TransactionStatus.InTransactionBlock: case TransactionStatus.InFailedTransactionBlock: - await Rollback(async); + await Rollback(async).ConfigureAwait(false); ClearTransaction(); - endBindingScope = true; break; default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); + return; } if (_sendResetOnClose) @@ -2165,13 +2559,35 @@ internal async Task Reset(bool async) DataReader.UnbindIfNecessary(); } + } + + /// + /// Called when a pooled connection with an enlisted System.Transactions transaction is closed. + /// Since we're inside a transaction block, we cannot send DISCARD ALL; + /// we prepend a reset message that only includes commands that can safely run within a transaction. + /// + internal void ResetWithinEnlistedTransaction() + { + // We start user action in case a keeplive happens concurrently, or a concurrent user command (bug) + using var _ = StartUserAction(attemptPgCancellation: false); + + // Our buffer may contain unsent prepended messages, so clear it out. + WriteBuffer.Clear(); + PendingPrependedResponses = 0; - if (endBindingScope) + ResetReadBuffer(); + + if (_sendResetOnClose) { - // Connection is null if a connection enlisted in a TransactionScope was closed before the - // TransactionScope completed - the connector is still enlisted, but has no connection. - Connection?.EndBindingScope(ConnectorBindingScope.Transaction); + if (_resetWithoutDeallocateMessage is null) + { + GenerateResetMessage(); + } + + PrependInternalMessage(_resetWithoutDeallocateMessage, _resetWithoutDeallocateResponseCount); } + + DataReader.UnbindIfNecessary(); } /// @@ -2183,6 +2599,20 @@ void ResetReadBuffer() { if (_origReadBuffer != null) { + Debug.Assert(_origReadBuffer.ReadBytesLeft == 0); + Debug.Assert(_origReadBuffer.ReadPosition == 0); + if (ReadBuffer.ReadBytesLeft > 0) + { + // There is still something in the buffer which we haven't read yet + // In most cases it's ParameterStatus which can be sent asynchronously + // If in some extreme case we have too much data left in the buffer to store in the original buffer + // we just leave the oversize buffer as is and will try again on next reset + if (ReadBuffer.ReadBytesLeft > _origReadBuffer.Size) + return; + + ReadBuffer.CopyTo(_origReadBuffer); + } + ReadBuffer.Dispose(); ReadBuffer = _origReadBuffer; _origReadBuffer = null; @@ -2233,44 +2663,17 @@ internal UserAction StartUserAction( CancellationToken cancellationToken = default, bool attemptPgCancellation = true) { - // If keepalive is enabled, we must protect state transitions with a SemaphoreSlim - // (which itself must be protected by a lock, since its dispose isn't thread-safe). + // If keepalive is enabled, we must protect state transitions with a lock. // This will make the keepalive abort safely if a user query is in progress, and make // the user query wait if a keepalive is in progress. - - // If keepalive isn't enabled, we don't use the semaphore and rely only on the connector's + // If keepalive isn't enabled, we don't use the lock and rely only on the connector's // state (updated via Interlocked.Exchange) to detect concurrent use, on a best-effort basis. - if (!_isKeepAliveEnabled) - return DoStartUserAction(newState, command); - - lock (this) - { - if (!IsConnected) - { - throw IsBroken - ? new NpgsqlException("The connection was previously broken because of the following exception", _breakReason) - : new NpgsqlException("The connection is closed"); - } - - // Disable keepalive, it will be restarted at the end of the user action - _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); - - try - { - // Check that the connector is ready. - return DoStartUserAction(newState, command); - } - catch (Exception ex) when (ex is not NpgsqlOperationInProgressException) - { - // We failed, but there is no current operation. - // As such, we re-enable the keepalive. - var keepAlive = Settings.KeepAlive * 1000; - _keepAliveTimer!.Change(keepAlive, keepAlive); - throw; - } - } + return _isKeepAliveEnabled + ? DoStartUserActionWithKeepAlive(newState, command, cancellationToken, attemptPgCancellation) + : DoStartUserAction(newState, command, cancellationToken, attemptPgCancellation); - UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) + UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command, + CancellationToken cancellationToken, bool attemptPgCancellation) { switch (State) { @@ -2278,7 +2681,8 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) break; case ConnectorState.Closed: case ConnectorState.Broken: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + break; case ConnectorState.Executing: case ConnectorState.Fetching: case ConnectorState.Waiting: @@ -2286,11 +2690,14 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) case ConnectorState.Connecting: case ConnectorState.Copy: var currentCommand = _currentCommand; - throw currentCommand == null - ? new NpgsqlOperationInProgressException(State) - : new NpgsqlOperationInProgressException(currentCommand); + if (currentCommand is null) + ThrowHelper.ThrowNpgsqlOperationInProgressException(State); + else + ThrowHelper.ThrowNpgsqlOperationInProgressException(currentCommand); + break; default: - throw new ArgumentOutOfRangeException(nameof(State), State, "Invalid connector state: " + State); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(State), "Invalid connector state: {0}", State); + break; } Debug.Assert(IsReady); @@ -2303,13 +2710,46 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) StartCancellableOperation(cancellationToken, attemptPgCancellation); - // We reset the UserTimeout for every user action, so it wouldn't leak from the previous query or action + // We reset the ReadBuffer.Timeout and WriteBuffer.Timeout for every user action, so it wouldn't leak from the previous query or action // For example, we might have successfully cancelled the previous query (so the connection is not broken) - // But the next time, we call the Prepare, which doesn't set it's own timeout - UserTimeout = (command?.CommandTimeout ?? Settings.CommandTimeout) * 1000; + // But the next time, we call the Prepare, which doesn't set its own timeout + var timeoutSeconds = command?.CommandTimeout ?? Settings.CommandTimeout; + ReadBuffer.Timeout = WriteBuffer.Timeout = timeoutSeconds > 0 ? TimeSpan.FromSeconds(timeoutSeconds) : Timeout.InfiniteTimeSpan; return new UserAction(this); } + + UserAction DoStartUserActionWithKeepAlive(ConnectorState newState, NpgsqlCommand? command, + CancellationToken cancellationToken, bool attemptPgCancellation) + { + lock (SyncObj) + { + if (!IsConnected) + { + if (IsBroken) + ThrowHelper.ThrowNpgsqlException("The connection was previously broken because of the following exception", _breakReason); + else + ThrowHelper.ThrowNpgsqlException("The connection is closed"); + } + + // Disable keepalive, it will be restarted at the end of the user action + _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); + + try + { + // Check that the connector is ready. + return DoStartUserAction(newState, command, cancellationToken, attemptPgCancellation); + } + catch (Exception ex) when (ex is not NpgsqlOperationInProgressException) + { + // We failed, but there is no current operation. + // As such, we re-enable the keepalive. + var keepAlive = Settings.KeepAlive * 1000; + _keepAliveTimer!.Change(keepAlive, keepAlive); + throw; + } + } + } } internal void EndUserAction() @@ -2320,7 +2760,7 @@ internal void EndUserAction() if (_isKeepAliveEnabled) { - lock (this) + lock (SyncObj) { if (IsReady || !IsConnected) return; @@ -2358,14 +2798,10 @@ internal void EndUserAction() #region Keepalive -#pragma warning disable CA1801 // Review unused parameters void PerformKeepAlive(object? state) { Debug.Assert(_isKeepAliveEnabled); - - // SemaphoreSlim.Dispose() isn't thread-safe - it may be in progress so we shouldn't try to wait on it; - // we need a standard lock to protect it. - if (!Monitor.TryEnter(this)) + if (!Monitor.TryEnter(SyncObj)) return; try @@ -2376,9 +2812,8 @@ void PerformKeepAlive(object? state) LogMessages.SendingKeepalive(ConnectionLogger, Id); AttemptPostgresCancellation = false; - var timeout = InternalCommandTimeout; - WriteBuffer.Timeout = TimeSpan.FromSeconds(timeout); - UserTimeout = timeout; + var timeout = Math.Max(Settings.CommandTimeout, MinimumInternalCommandTimeout); + ReadBuffer.Timeout = WriteBuffer.Timeout = TimeSpan.FromSeconds(timeout); WriteSync(async: false).GetAwaiter().GetResult(); Flush(); SkipUntil(BackendMessageCode.ReadyForQuery); @@ -2398,10 +2833,9 @@ void PerformKeepAlive(object? state) } finally { - Monitor.Exit(this); + Monitor.Exit(SyncObj); } } -#pragma warning restore CA1801 // Review unused parameters #endregion @@ -2412,18 +2846,21 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel using var _ = StartUserAction(ConnectorState.Waiting, cancellationToken: cancellationToken, attemptPgCancellation: false); // We may have prepended messages in the connection's write buffer - these need to be flushed now. - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); var keepaliveMs = Settings.KeepAlive * 1000; + var isTimeoutInfinite = timeout <= 0; while (true) { cancellationToken.ThrowIfCancellationRequested(); - var timeoutForKeepalive = _isKeepAliveEnabled && (timeout <= 0 || keepaliveMs < timeout); - UserTimeout = timeoutForKeepalive ? keepaliveMs : timeout; + var timeoutForKeepalive = _isKeepAliveEnabled && (isTimeoutInfinite || keepaliveMs < timeout); + ReadBuffer.Timeout = timeoutForKeepalive + ? TimeSpan.FromMilliseconds(keepaliveMs) + : isTimeoutInfinite ? Timeout.InfiniteTimeSpan : TimeSpan.FromMilliseconds(timeout); try { - var msg = await ReadMessageWithNotifications(async); + var msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); if (msg != null) { throw Break( @@ -2439,9 +2876,9 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel LogMessages.SendingKeepalive(ConnectionLogger, Id); - var keepaliveTime = Stopwatch.StartNew(); - await WriteSync(async, cancellationToken); - await Flush(async, cancellationToken); + var keepaliveStartTimestamp = Stopwatch.GetTimestamp(); + await WriteSync(async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); var receivedNotification = false; var expectedMessageCode = BackendMessageCode.RowDescription; @@ -2452,13 +2889,14 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel try { - msg = await ReadMessageWithNotifications(async); + msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); } - catch (Exception e) when (e is OperationCanceledException || e is NpgsqlException npgEx && npgEx.InnerException is TimeoutException) + catch (Exception e) when (e is OperationCanceledException || e is NpgsqlException { InnerException: TimeoutException }) { // We're somewhere in the middle of a reading keepalive messages // Breaking the connection, as we've lost protocol sync - throw Break(e); + Break(e); + throw; } if (msg == null) @@ -2479,7 +2917,11 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel } if (timeout > 0) - timeout -= (keepaliveMs + (int)keepaliveTime.ElapsedMilliseconds); + { + timeout -= (keepaliveMs + (int)Stopwatch.GetElapsedTime(keepaliveStartTimestamp).TotalMilliseconds); + // Make sure we don't accidentally set -1 as a timeout (because it's infinite) + timeout = Math.Max(timeout, 0); + } } } @@ -2509,20 +2951,20 @@ internal async Task ExecuteInternalCommand(string query, bool async, Cancellatio { LogMessages.ExecutingInternalCommand(CommandLogger, query, Id); - await WriteQuery(query, async, cancellationToken); - await Flush(async, cancellationToken); - Expect(await ReadMessage(async), this); - Expect(await ReadMessage(async), this); + await WriteQuery(query, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); + Expect(await ReadMessage(async).ConfigureAwait(false), this); + Expect(await ReadMessage(async).ConfigureAwait(false), this); } internal async Task ExecuteInternalCommand(byte[] data, bool async, CancellationToken cancellationToken = default) { Debug.Assert(State != ConnectorState.Ready, "Forgot to start a user action..."); - await WritePregenerated(data, async, cancellationToken); - await Flush(async, cancellationToken); - Expect(await ReadMessage(async), this); - Expect(await ReadMessage(async), this); + await WritePregenerated(data, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); + Expect(await ReadMessage(async).ConfigureAwait(false), this); + Expect(await ReadMessage(async).ConfigureAwait(false), this); } #endregion @@ -2536,6 +2978,12 @@ internal async Task ExecuteInternalCommand(byte[] data, bool async, Cancellation /// A object. public NpgsqlCommand CreateCommand(string? cmdText = null) => new(cmdText, this); + /// + /// Creates and returns a object associated with the . + /// + /// A object. + public NpgsqlBatch CreateBatch() => new NpgsqlBatch(this); + void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan incomingValue) { byte[] rawName; @@ -2543,7 +2991,7 @@ void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan inc for (var i = 0; i < _rawParameters.Count; i++) { - (var currentName, var currentValue) = _rawParameters[i]; + var (currentName, currentValue) = _rawParameters[i]; if (incomingName.SequenceEqual(currentName)) { if (incomingValue.SequenceEqual(currentValue)) @@ -2570,8 +3018,6 @@ void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan inc switch (name) { case "standard_conforming_strings": - if (value != "on" && Settings.Multiplexing) - throw Break(new NotSupportedException("standard_conforming_strings must be on with multiplexing")); UseConformingStrings = value == "on"; return; @@ -2606,6 +3052,27 @@ void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan inc return null; } + internal Activity? TraceCopyStart(string copyCommand, string operation) + { + Activity? activity = null; + if (NpgsqlActivitySource.IsEnabled) + { + var tracingOptions = DataSource.Configuration.TracingOptions; + + if (tracingOptions.CopyOperationFilter?.Invoke(copyCommand) ?? true) + { + var spanName = tracingOptions.CopyOperationSpanNameProvider?.Invoke(copyCommand); + activity = NpgsqlActivitySource.CopyStart(copyCommand, this, spanName, operation); + + if (activity != null) + { + tracingOptions.CopyOperationEnrichmentCallback?.Invoke(activity, copyCommand); + } + } + } + return activity; + } + #endregion Misc } @@ -2663,9 +3130,7 @@ enum ConnectorState Replication, } -#pragma warning disable CA1717 enum TransactionStatus : byte -#pragma warning restore CA1717 { /// /// Currently not in a transaction block @@ -2693,7 +3158,7 @@ enum TransactionStatus : byte /// /// Specifies how to load/parse DataRow messages as they're received from the backend. /// -internal enum DataRowLoadingMode +enum DataRowLoadingMode { /// /// Load DataRows in non-sequential mode @@ -2711,4 +3176,11 @@ internal enum DataRowLoadingMode Skip } +enum GssEncryptionResult +{ + GetCredentialFailure, + NegotiateFailure, + Success +} + #endregion diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index 09417eef21..0bd6dc3992 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -1,9 +1,8 @@ -using System; -using System.Collections.Concurrent; +using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Threading.Tasks; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Util; @@ -13,15 +12,16 @@ namespace Npgsql.Internal; /// Base class for implementations which provide information about PostgreSQL and PostgreSQL-like databases /// (e.g. type definitions, capabilities...). /// +[Experimental(NpgsqlDiagnostics.DatabaseInfoExperimental)] public abstract class NpgsqlDatabaseInfo { #region Fields - static volatile INpgsqlDatabaseInfoFactory[] Factories = new INpgsqlDatabaseInfoFactory[] - { + static volatile INpgsqlDatabaseInfoFactory[] Factories = + [ new PostgresMinimalDatabaseInfoFactory(), new PostgresDatabaseInfoFactory() - }; + ]; #endregion Fields @@ -116,13 +116,13 @@ public abstract class NpgsqlDatabaseInfo #region Types - readonly List _baseTypesMutable = new(); - readonly List _arrayTypesMutable = new(); - readonly List _rangeTypesMutable = new(); - readonly List _multirangeTypesMutable = new(); - readonly List _enumTypesMutable = new(); - readonly List _compositeTypesMutable = new(); - readonly List _domainTypesMutable = new(); + readonly List _baseTypesMutable = []; + readonly List _arrayTypesMutable = []; + readonly List _rangeTypesMutable = []; + readonly List _multirangeTypesMutable = []; + readonly List _enumTypesMutable = []; + readonly List _compositeTypesMutable = []; + readonly List _domainTypesMutable = []; internal IReadOnlyList BaseTypes => _baseTypesMutable; internal IReadOnlyList ArrayTypes => _arrayTypesMutable; @@ -138,7 +138,7 @@ public abstract class NpgsqlDatabaseInfo internal Dictionary ByOID { get; } = new(); /// - /// Indexes backend types by their PostgreSQL name, including namespace (e.g. pg_catalog.int4). + /// Indexes backend types by their PostgreSQL internal name, including namespace (e.g. pg_catalog.int4). /// Only used for enums and composites. /// internal Dictionary ByFullName { get; } = new(); @@ -179,10 +179,22 @@ private protected NpgsqlDatabaseInfo(string host, int port, string databaseName, Version = ParseServerVersion(serverVersion); } - public PostgresType GetPostgresTypeByName(string pgName) + internal PostgresType GetPostgresType(Oid oid) => GetPostgresType(oid.Value); + + public PostgresType GetPostgresType(uint oid) + => ByOID.TryGetValue(oid, out var pgType) + ? pgType + : throw new ArgumentException($"A PostgreSQL type with the oid '{oid}' was not found in the current database info"); + + internal PostgresType GetPostgresType(DataTypeName dataTypeName) + => ByFullName.TryGetValue(dataTypeName.Value, out var value) + ? value + : throw new ArgumentException($"A PostgreSQL type with the name '{dataTypeName}' was not found in the current database info"); + + public PostgresType GetPostgresType(string pgName) => TryGetPostgresTypeByName(pgName, out var pgType) ? pgType - : throw new ArgumentException($"A PostgreSQL type with the name '{pgName}' was not found in the database"); + : throw new ArgumentException($"A PostgreSQL type with the name '{pgName}' was not found in the current database info"); public bool TryGetPostgresTypeByName(string pgName, [NotNullWhen(true)] out PostgresType? pgType) { @@ -204,7 +216,11 @@ public bool TryGetPostgresTypeByName(string pgName, [NotNullWhen(true)] out Post if (ByFullName.TryGetValue($"pg_catalog.{pgName}", out pgType)) return true; - var ambiguousTypes = ByFullName.Keys.Where(n => n.EndsWith($".{pgName}", StringComparison.Ordinal)); + var ambiguousTypes = new List(); + foreach (var key in ByFullName.Keys) + if (key.EndsWith($".{pgName}", StringComparison.Ordinal)) + ambiguousTypes.Add(key); + throw new ArgumentException($"More than one PostgreSQL type was found with the name {pgName}, " + $"please specify a full name including schema: {string.Join(", ", ambiguousTypes)}"); } @@ -214,15 +230,20 @@ public bool TryGetPostgresTypeByName(string pgName, [NotNullWhen(true)] out Post internal void ProcessTypes() { + var unspecified = new PostgresBaseType(DataTypeName.Unspecified, Oid.Unspecified); + ByOID[Oid.Unspecified.Value] = unspecified; + ByFullName[unspecified.DataTypeName.Value] = unspecified; + ByName[unspecified.InternalName] = unspecified; + foreach (var type in GetTypes()) { ByOID[type.OID] = type; - ByFullName[type.FullName] = type; + ByFullName[type.DataTypeName.Value] = type; // If more than one type exists with the same partial name, we place a null value. // This allows us to detect this case later and force the user to use full names only. - ByName[type.Name] = ByName.ContainsKey(type.Name) - ? null - : type; + var typeInternalName = type.InternalName; + if (!ByName.TryAdd(typeInternalName, type)) + ByName[typeInternalName] = null; switch (type) { @@ -292,8 +313,7 @@ protected static Version ParseServerVersion(string value) /// public static void RegisterFactory(INpgsqlDatabaseInfoFactory factory) { - if (factory == null) - throw new ArgumentNullException(nameof(factory)); + ArgumentNullException.ThrowIfNull(factory); var factories = new INpgsqlDatabaseInfoFactory[Factories.Length + 1]; factories[0] = factory; @@ -305,7 +325,7 @@ internal static async Task Load(NpgsqlConnector conn, Npgsql { foreach (var factory in Factories) { - var dbInfo = await factory.Load(conn, timeout, async); + var dbInfo = await factory.Load(conn, timeout, async).ConfigureAwait(false); if (dbInfo != null) { dbInfo.ProcessTypes(); @@ -319,11 +339,31 @@ internal static async Task Load(NpgsqlConnector conn, Npgsql // For tests internal static void ResetFactories() - => Factories = new INpgsqlDatabaseInfoFactory[] - { + => Factories = + [ new PostgresMinimalDatabaseInfoFactory(), new PostgresDatabaseInfoFactory() - }; + ]; #endregion Factory management -} \ No newline at end of file + + internal Oid GetOid(PgTypeId pgTypeId, bool validate = false) + => pgTypeId.IsOid + ? validate ? GetPostgresType(pgTypeId.Oid).OID : pgTypeId.Oid + : GetPostgresType(pgTypeId.DataTypeName).OID; + + internal DataTypeName GetDataTypeName(PgTypeId pgTypeId, bool validate = false) + => pgTypeId.IsDataTypeName + ? validate ? GetPostgresType(pgTypeId.DataTypeName).DataTypeName : pgTypeId.DataTypeName + : GetPostgresType(pgTypeId.Oid).DataTypeName; + + internal PostgresType GetPostgresType(PgTypeId pgTypeId) + => pgTypeId.IsOid + ? GetPostgresType(pgTypeId.Oid.Value) + : GetPostgresType(pgTypeId.DataTypeName.Value); + + internal PostgresType? FindPostgresType(PgTypeId pgTypeId) + => pgTypeId.IsOid + ? ByOID.TryGetValue(pgTypeId.Oid.Value, out var pgType) ? pgType : null + : TryGetPostgresTypeByName(pgTypeId.DataTypeName.Value, out pgType) ? pgType : null; +} diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index 39ebad22a7..eeee079c86 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.IO; using System.Threading; @@ -6,33 +6,43 @@ namespace Npgsql.Internal; -public sealed partial class NpgsqlReadBuffer +sealed partial class NpgsqlReadBuffer { internal sealed class ColumnStream : Stream { readonly NpgsqlConnector _connector; readonly NpgsqlReadBuffer _buf; - int _start, _len, _read; + long _startPos; + int _start; + int _read; bool _canSeek; - readonly bool _startCancellableOperations; + bool _commandScoped; + bool _consumeOnDispose; + /// Does not throw ODE. + internal int CurrentLength { get; private set; } internal bool IsDisposed { get; private set; } - internal ColumnStream(NpgsqlConnector connector, bool startCancellableOperations = true) + internal ColumnStream(NpgsqlConnector connector) { _connector = connector; _buf = connector.ReadBuffer; - _startCancellableOperations = startCancellableOperations; IsDisposed = true; } - internal void Init(int len, bool canSeek) + internal void Init(int len, bool canSeek, bool commandScoped, bool consumeOnDispose = true) { Debug.Assert(!canSeek || _buf.ReadBytesLeft >= len, "Seekable stream constructed but not all data is in buffer (sequential)"); - _start = _buf.ReadPosition; - _len = len; - _read = 0; + _startPos = _buf.CumulativeReadPosition; + _canSeek = canSeek; + _start = canSeek ? _buf.ReadPosition : 0; + + CurrentLength = len; + _read = 0; + + _commandScoped = commandScoped; + _consumeOnDispose = consumeOnDispose; IsDisposed = false; } @@ -47,7 +57,7 @@ public override long Length get { CheckDisposed(); - return _len; + return CurrentLength; } } @@ -63,9 +73,8 @@ public override long Position } set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), "Non - negative number required."); - Seek(_start + value, SeekOrigin.Begin); + ArgumentOutOfRangeException.ThrowIfNegative(value); + Seek(value, SeekOrigin.Begin); } } @@ -75,8 +84,7 @@ public override long Seek(long offset, SeekOrigin origin) if (!_canSeek) throw new NotSupportedException(); - if (offset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(offset), "Stream length must be non-negative and less than 2^31 - 1 - origin."); + ArgumentOutOfRangeException.ThrowIfGreaterThan(offset, int.MaxValue); const string seekBeforeBegin = "An attempt was made to move the position before the beginning of the stream."; @@ -87,8 +95,9 @@ public override long Seek(long offset, SeekOrigin origin) var tempPosition = unchecked(_start + (int)offset); if (offset < 0 || tempPosition < _start) throw new IOException(seekBeforeBegin); - _buf.ReadPosition = _start; - return tempPosition; + _buf.ReadPosition = tempPosition; + _read = (int)offset; + return _read; } case SeekOrigin.Current: { @@ -96,15 +105,17 @@ public override long Seek(long offset, SeekOrigin origin) if (unchecked(_buf.ReadPosition + offset) < _start || tempPosition < _start) throw new IOException(seekBeforeBegin); _buf.ReadPosition = tempPosition; - return tempPosition; + _read += (int)offset; + return _read; } case SeekOrigin.End: { - var tempPosition = unchecked(_len + (int)offset); - if (unchecked(_len + offset) < _start || tempPosition < _start) + var tempPosition = unchecked(_start + CurrentLength + (int)offset); + if (unchecked(_start + CurrentLength + offset) < _start || tempPosition < _start) throw new IOException(seekBeforeBegin); _buf.ReadPosition = tempPosition; - return tempPosition; + _read = CurrentLength + (int)offset; + return _read; } default: throw new ArgumentOutOfRangeException(nameof(origin), "Invalid seek origin."); @@ -137,52 +148,38 @@ public override int Read(byte[] buffer, int offset, int count) public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateArguments(buffer, offset, count); - - using (NoSynchronizationContextScope.Enter()) - return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if NETSTANDARD2_0 - public int Read(Span span) -#else public override int Read(Span span) -#endif { CheckDisposed(); - var count = Math.Min(span.Length, _len - _read); + var count = Math.Min(span.Length, CurrentLength - _read); if (count == 0) return 0; - var read = _buf.Read(span.Slice(0, count)); + var read = _buf.Read(_commandScoped, span.Slice(0, count)); _read += read; return read; } -#if NETSTANDARD2_0 - public ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) -#else public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) -#endif { CheckDisposed(); - var count = Math.Min(buffer.Length, _len - _read); - - if (count == 0) - return new ValueTask(0); - - using (NoSynchronizationContextScope.Enter()) - return ReadLong(this, buffer.Slice(0, count), cancellationToken); + var count = Math.Min(buffer.Length, CurrentLength - _read); + return count == 0 ? new ValueTask(0) : ReadLong(this, buffer.Slice(0, count), cancellationToken); static async ValueTask ReadLong(ColumnStream stream, Memory buffer, CancellationToken cancellationToken = default) { - using var registration = stream._startCancellableOperations + using var registration = cancellationToken.CanBeCanceled ? stream._connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false) : default; - var read = await stream._buf.ReadAsync(buffer, cancellationToken); + + var read = await stream._buf.ReadAsync(stream._commandScoped, buffer, cancellationToken).ConfigureAwait(false); stream._read += read; return read; } @@ -192,50 +189,40 @@ public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); void CheckDisposed() - { - if (IsDisposed) - throw new ObjectDisposedException(null); - } + => ObjectDisposedException.ThrowIf(IsDisposed, this); protected override void Dispose(bool disposing) - => DisposeAsync(disposing, async: false).GetAwaiter().GetResult(); - -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else - public override ValueTask DisposeAsync() -#endif { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsync(disposing: true, async: true); + if (disposing) + DisposeCore(async: false).GetAwaiter().GetResult(); } - async ValueTask DisposeAsync(bool disposing, bool async) + public override ValueTask DisposeAsync() + => DisposeCore(async: true); + + async ValueTask DisposeCore(bool async) { - if (IsDisposed || !disposing) + if (IsDisposed) return; - var leftToSkip = _len - _read; - if (leftToSkip > 0) + if (_consumeOnDispose && !_connector.IsBroken) { - if (async) - await _buf.Skip(leftToSkip, async); - else - _buf.Skip(leftToSkip, async).GetAwaiter().GetResult(); + var pos = _buf.CumulativeReadPosition - _startPos; + var remaining = checked((int)(CurrentLength - pos)); + if (remaining > 0) + await _buf.Skip(async, remaining).ConfigureAwait(false); } + IsDisposed = true; } } static void ValidateArguments(byte[] buffer, int offset, int count) { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentOutOfRangeException(nameof(offset)); - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 50ef859a76..47e9b3515e 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; @@ -12,47 +12,34 @@ using Npgsql.Util; using static System.Threading.Timeout; -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member - namespace Npgsql.Internal; /// /// A buffer used by Npgsql to read data from the socket efficiently. /// Provides methods which decode different values types and tracks the current position. /// -public sealed partial class NpgsqlReadBuffer : IDisposable +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +sealed partial class NpgsqlReadBuffer : IDisposable { #region Fields and Properties public NpgsqlConnection Connection => Connector.Connection!; - internal readonly NpgsqlConnector Connector; - internal Stream Underlying { private get; set; } - readonly Socket? _underlyingSocket; - internal ResettableCancellationTokenSource Cts { get; } - - TimeSpan _preTranslatedTimeout = TimeSpan.Zero; + readonly MetricsReporter? _metricsReporter; /// /// Timeout for sync and async reads /// internal TimeSpan Timeout { - get => _preTranslatedTimeout; + get => Cts.Timeout; set { - if (_preTranslatedTimeout != value) + if (Cts.Timeout != value) { - _preTranslatedTimeout = value; - - if (value == TimeSpan.Zero) - value = InfiniteTimeSpan; - else if (value < TimeSpan.Zero) - value = TimeSpan.Zero; - Debug.Assert(_underlyingSocket != null); _underlyingSocket.ReceiveTimeout = (int)value.TotalMilliseconds; @@ -75,15 +62,24 @@ internal TimeSpan Timeout /// internal Encoding RelaxedTextEncoding { get; } - internal int ReadPosition { get; set; } - internal int ReadBytesLeft => FilledBytes - ReadPosition; + internal int ReadBytesLeft { get; private set; } + internal int ReadPosition + { + get => FilledBytes - ReadBytesLeft; + set => ReadBytesLeft = FilledBytes - value; + } + internal PgReader PgReader { get; } - internal readonly byte[] Buffer; - internal int FilledBytes; + // Tracks the absolute position of the end of the buffered window. + // Invariant: _bufferEndPosition == CumulativeReadPosition + ReadBytesLeft. + long _bufferEndPosition; // this will always fit at least one message. + internal long CumulativeReadPosition + => _bufferEndPosition - ReadBytesLeft; - ColumnStream? _columnStream; + internal readonly byte[] Buffer; + internal int FilledBytes { get; private set; } - PreparedTextReader? _preparedTextReader; + internal ReadOnlySpan Span => Buffer.AsSpan(ReadPosition, ReadBytesLeft); readonly bool _usePool; bool _disposed; @@ -99,7 +95,7 @@ internal TimeSpan Timeout #region Constructors internal NpgsqlReadBuffer( - NpgsqlConnector connector, + NpgsqlConnector? connector, Stream stream, Socket? socket, int size, @@ -107,14 +103,12 @@ internal NpgsqlReadBuffer( Encoding relaxedTextEncoding, bool usePool = false) { - if (size < MinimumSize) - { - throw new ArgumentOutOfRangeException(nameof(size), size, "Buffer size must be at least " + MinimumSize); - } + ArgumentOutOfRangeException.ThrowIfLessThan(size, MinimumSize); - Connector = connector; + Connector = connector!; // TODO: Clean this up Underlying = stream; _underlyingSocket = socket; + _metricsReporter = connector?.DataSource.MetricsReporter; Cts = new ResettableCancellationTokenSource(); Buffer = usePool ? ArrayPool.Shared.Rent(size) : new byte[size]; Size = Buffer.Length; @@ -122,29 +116,158 @@ internal NpgsqlReadBuffer( TextEncoding = textEncoding; RelaxedTextEncoding = relaxedTextEncoding; + PgReader = new PgReader(this); } #endregion #region I/O - internal void Ensure(int count) => Ensure(count, false).GetAwaiter().GetResult(); + // Used for testing. + internal void AddBytesToRead(int count) + { + ArgumentOutOfRangeException.ThrowIfNegative(count); + ArgumentOutOfRangeException.ThrowIfGreaterThan(FilledBytes + count, Size, nameof(count)); + FilledBytes += count; + ReadBytesLeft += count; + _bufferEndPosition = unchecked(_bufferEndPosition + count); + } + + public void Ensure(int count) + => Ensure(count, async: false, readingNotifications: false).GetAwaiter().GetResult(); - public Task Ensure(int count, bool async) + public ValueTask Ensure(int count, bool async) => Ensure(count, async, readingNotifications: false); - public Task EnsureAsync(int count) + public ValueTask EnsureAsync(int count) => Ensure(count, async: true, readingNotifications: false); + // Can't share due to Span vs Memory difference (can't make a memory out of a span). + int ReadWithTimeout(Span buffer) + { + while (true) + { + try + { + var read = Underlying.Read(buffer); + _bufferEndPosition = unchecked(_bufferEndPosition + read); + NpgsqlEventSource.Log.BytesRead(read); + return read; + } + catch (Exception ex) + { + var connector = Connector; + if (ex is IOException { InnerException: SocketException { SocketErrorCode: SocketError.TimedOut } }) + { + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector is { AttemptPostgresCancellation: true, PostgresCancellationPerformed: false } + && connector.PerformPostgresCancellation()) + { + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) + { + if (cancellationTimeout > 0) + Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + + continue; + } + } + + // If we're here, the PostgreSQL cancellation either failed or skipped entirely. + // Break the connection, bubbling up the correct exception type (cancellation or timeout) + throw connector.Break(CreateCancelException(connector)); + } + + throw connector.Break(new NpgsqlException("Exception while reading from stream", ex)); + } + } + } + + async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken cancellationToken) + { + var finalCt = Timeout != InfiniteTimeSpan + ? Cts.Start(cancellationToken) + : Cts.Reset(); + + while (true) + { + try + { + var read = await Underlying.ReadAsync(buffer, finalCt).ConfigureAwait(false); + _bufferEndPosition = unchecked(_bufferEndPosition + read); + Cts.Stop(); + NpgsqlEventSource.Log.BytesRead(read); + return read; + } + catch (Exception ex) + { + var connector = Connector; + Cts.Stop(); + switch (ex) + { + // Read timeout + case OperationCanceledException: + // Note that mono throws SocketException with the wrong error (see #1330) + case IOException e when (e.InnerException as SocketException)?.SocketErrorCode == + (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): + { + Debug.Assert(ex is OperationCanceledException); + + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector is { AttemptPostgresCancellation: true, PostgresCancellationPerformed: false } && + connector.PerformPostgresCancellation()) + { + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) + { + if (cancellationTimeout > 0) + Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + + finalCt = Cts.Start(cancellationToken); + continue; + } + } + + // If we're here, the PostgreSQL cancellation either failed or skipped entirely. + // Break the connection, bubbling up the correct exception type (cancellation or timeout) + throw connector.Break(CreateCancelException(connector)); + } + default: + throw connector.Break(new NpgsqlException("Exception while reading from stream", ex)); + } + } + } + } + + static Exception CreateCancelException(NpgsqlConnector connector) + => !connector.UserCancellationRequested + ? NpgsqlTimeoutException() + : connector.PostgresCancellationPerformed + ? new OperationCanceledException("Query was cancelled", TimeoutException(), connector.UserCancellationToken) + : new OperationCanceledException("Query was cancelled", connector.UserCancellationToken); + + static Exception NpgsqlTimeoutException() => new NpgsqlException("Exception while reading from stream", TimeoutException()); + + static Exception TimeoutException() => new TimeoutException("Timeout during reading attempt"); + /// /// Ensures that bytes are available in the buffer, and if /// not, reads from the socket until enough is available. /// - internal Task Ensure(int count, bool async, bool readingNotifications) + internal ValueTask Ensure(int count, bool async, bool readingNotifications) { - return count <= ReadBytesLeft ? Task.CompletedTask : EnsureLong(this, count, async, readingNotifications); + return count <= ReadBytesLeft ? new() : EnsureLong(this, count, async, readingNotifications); - static async Task EnsureLong( + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + static async ValueTask EnsureLong( NpgsqlReadBuffer buffer, int count, bool async, @@ -154,18 +277,17 @@ static async Task EnsureLong( Debug.Assert(count > buffer.ReadBytesLeft); count -= buffer.ReadBytesLeft; - if (buffer.ReadPosition == buffer.FilledBytes) + if (buffer.ReadBytesLeft == 0) { - buffer.Clear(); + buffer.ResetPosition(); } else if (count > buffer.Size - buffer.FilledBytes) { Array.Copy(buffer.Buffer, buffer.ReadPosition, buffer.Buffer, 0, buffer.ReadBytesLeft); buffer.FilledBytes = buffer.ReadBytesLeft; - buffer.ReadPosition = 0; } - var finalCt = async && buffer.Timeout != TimeSpan.Zero + var finalCt = async && buffer.Timeout != InfiniteTimeSpan ? buffer.Cts.Start() : buffer.Cts.Reset(); @@ -176,13 +298,15 @@ static async Task EnsureLong( { var toRead = buffer.Size - buffer.FilledBytes; var read = async - ? await buffer.Underlying.ReadAsync(buffer.Buffer.AsMemory(buffer.FilledBytes, toRead), finalCt) + ? await buffer.Underlying.ReadAsync(buffer.Buffer.AsMemory(buffer.FilledBytes, toRead), finalCt).ConfigureAwait(false) : buffer.Underlying.Read(buffer.Buffer, buffer.FilledBytes, toRead); if (read == 0) throw new EndOfStreamException(); count -= read; buffer.FilledBytes += read; + buffer.ReadBytesLeft += read; + buffer._bufferEndPosition = unchecked(buffer._bufferEndPosition + read); totalRead += read; // Most of the time, it should be fine to reset cancellation token source, so we can use it again @@ -213,42 +337,30 @@ static async Task EnsureLong( { Debug.Assert(e is OperationCanceledException ? async : !async); - var isStreamBroken = false; -#if NETSTANDARD2_0 - // SslStream on .NET Framework treats any IOException (including timeouts) as fatal and may - // return garbage if reused. To prevent this, we flow down and break the connection immediately. - // See #4305. - isStreamBroken = connector.IsSecure && e is IOException; -#endif - - if (!isStreamBroken) + // When reading notifications (Wait), just throw TimeoutException or + // OperationCanceledException immediately. + // Nothing to cancel, and no breaking of the connection. + if (readingNotifications) + throw CreateException(connector); + + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector is { AttemptPostgresCancellation: true, PostgresCancellationPerformed: false } && + connector.PerformPostgresCancellation()) { - // When reading notifications (Wait), just throw TimeoutException or - // OperationCanceledException immediately. - // Nothing to cancel, and no breaking of the connection. - if (readingNotifications) - throw CreateException(connector); - - // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. - // TODO: As an optimization, we can still attempt to send a cancellation request, but after - // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation()) + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) { - // Note that if the cancellation timeout is negative, we flow down and break the - // connection immediately. - var cancellationTimeout = connector.Settings.CancellationTimeout; - if (cancellationTimeout >= 0) - { - if (cancellationTimeout > 0) - buffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - - if (async) - finalCt = buffer.Cts.Start(); - - continue; - } + if (cancellationTimeout > 0) + buffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + + if (async) + finalCt = buffer.Cts.Start(); + + continue; } } @@ -272,6 +384,7 @@ static Exception CreateException(NpgsqlConnector connector) buffer.Cts.Stop(); NpgsqlEventSource.Log.BytesRead(totalRead); + buffer._metricsReporter?.ReportBytesRead(totalRead); static Exception NpgsqlTimeoutException() => new NpgsqlException("Exception while reading from stream", TimeoutException()); @@ -279,7 +392,7 @@ static Exception CreateException(NpgsqlConnector connector) } } - internal Task ReadMore(bool async) => Ensure(ReadBytesLeft + 1, async); + internal ValueTask ReadMore(bool async) => Ensure(ReadBytesLeft + 1, async); internal NpgsqlReadBuffer AllocateOversize(int count) { @@ -288,23 +401,44 @@ internal NpgsqlReadBuffer AllocateOversize(int count) if (_underlyingSocket != null) tempBuf.Timeout = Timeout; CopyTo(tempBuf); - Clear(); + ResetPosition(); return tempBuf; } /// - /// Does not perform any I/O - assuming that the bytes to be skipped are in the memory buffer. + /// Skip a given number of bytes. /// - internal void Skip(long len) + internal void Skip(int len, bool allowIO) + { + Debug.Assert(len >= 0); + + if (allowIO && len > ReadBytesLeft) + { + len -= ReadBytesLeft; + while (len > Size) + { + ResetPosition(); + Ensure(Size); + len -= Size; + } + ResetPosition(); + Ensure(len); + } + + Debug.Assert(ReadBytesLeft >= len); + ReadBytesLeft -= len; + } + + internal void Skip(int len) { Debug.Assert(ReadBytesLeft >= len); - ReadPosition += (int)len; + ReadBytesLeft -= len; } /// /// Skip a given number of bytes. /// - public async Task Skip(long len, bool async) + public async Task Skip(bool async, int len) { Debug.Assert(len >= 0); @@ -313,15 +447,15 @@ public async Task Skip(long len, bool async) len -= ReadBytesLeft; while (len > Size) { - Clear(); - await Ensure(Size, async); + ResetPosition(); + await Ensure(Size, async).ConfigureAwait(false); len -= Size; } - Clear(); - await Ensure((int)len, async); + ResetPosition(); + await Ensure(len, async).ConfigureAwait(false); } - ReadPosition += (int)len; + ReadBytesLeft -= len; } #endregion @@ -329,159 +463,132 @@ public async Task Skip(long len, bool async) #region Read Simple [MethodImpl(MethodImplOptions.AggressiveInlining)] - public sbyte ReadSByte() => Read(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public byte ReadByte() => Read(); + public byte ReadByte() + { + CheckBounds(sizeof(byte)); + var result = Buffer[ReadPosition]; + ReadBytesLeft -= sizeof(byte); + return result; + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public short ReadInt16() - => ReadInt16(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public short ReadInt16(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(sizeof(short)); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(short); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ushort ReadUInt16() - => ReadUInt16(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public ushort ReadUInt16(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(sizeof(ushort)); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(ushort); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public int ReadInt32() - => ReadInt32(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int ReadInt32(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(sizeof(int)); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(int); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public uint ReadUInt32() - => ReadUInt32(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public uint ReadUInt32(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(sizeof(uint)); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(uint); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public long ReadInt64() - => ReadInt64(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long ReadInt64(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(sizeof(long)); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(long); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ulong ReadUInt64() - => ReadUInt64(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public ulong ReadUInt64(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(sizeof(ulong)); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(ulong); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public float ReadSingle() - => ReadSingle(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public float ReadSingle(bool littleEndian) { - var result = ReadInt32(littleEndian); - return Unsafe.As(ref result); + CheckBounds(sizeof(float)); + var result = BitConverter.IsLittleEndian + ? BitConverter.Int32BitsToSingle(BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition]))) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(float); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public double ReadDouble() - => ReadDouble(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public double ReadDouble(bool littleEndian) { - var result = ReadInt64(littleEndian); - return Unsafe.As(ref result); + CheckBounds(sizeof(double)); + var result = BitConverter.IsLittleEndian + ? BitConverter.Int64BitsToDouble(BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition]))) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadBytesLeft -= sizeof(double); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - T Read() + void CheckBounds(int count) { - if (Unsafe.SizeOf() > ReadBytesLeft) - ThrowNotSpaceLeft(); - - var result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += Unsafe.SizeOf(); - return result; + if (count > ReadBytesLeft) + ThrowHelper.ThrowInvalidOperationException("There is not enough data left in the buffer."); } - [MethodImpl(MethodImplOptions.NoInlining)] - static void ThrowNotSpaceLeft() - => throw new InvalidOperationException("There is not enough space left in the buffer."); - public string ReadString(int byteLen) { - Debug.Assert(byteLen <= ReadBytesLeft); + CheckBounds(byteLen); var result = TextEncoding.GetString(Buffer, ReadPosition, byteLen); - ReadPosition += byteLen; - return result; - } - - public char[] ReadChars(int byteLen) - { - Debug.Assert(byteLen <= ReadBytesLeft); - var result = TextEncoding.GetChars(Buffer, ReadPosition, byteLen); - ReadPosition += byteLen; + ReadBytesLeft -= byteLen; return result; } public void ReadBytes(Span output) { - Debug.Assert(output.Length <= ReadBytesLeft); + CheckBounds(output.Length); new Span(Buffer, ReadPosition, output.Length).CopyTo(output); - ReadPosition += output.Length; + ReadBytesLeft -= output.Length; } public void ReadBytes(byte[] output, int outputOffset, int len) => ReadBytes(new Span(output, outputOffset, len)); - public ReadOnlySpan ReadSpan(int len) - { - Debug.Assert(len <= ReadBytesLeft); - var span = new ReadOnlySpan(Buffer, ReadPosition, len); - ReadPosition += len; - return span; - } - public ReadOnlyMemory ReadMemory(int len) { - Debug.Assert(len <= ReadBytesLeft); + CheckBounds(len); var memory = new ReadOnlyMemory(Buffer, ReadPosition, len); - ReadPosition += len; + ReadBytesLeft -= len; return memory; } @@ -489,26 +596,31 @@ public ReadOnlyMemory ReadMemory(int len) #region Read Complex - public int Read(Span output) + public int Read(bool commandScoped, Span output) { var readFromBuffer = Math.Min(ReadBytesLeft, output.Length); if (readFromBuffer > 0) { - new Span(Buffer, ReadPosition, readFromBuffer).CopyTo(output); - ReadPosition += readFromBuffer; + Buffer.AsSpan(ReadPosition, readFromBuffer).CopyTo(output); + ReadBytesLeft -= readFromBuffer; return readFromBuffer; } - if (output.Length == 0) - return 0; + // Only reset if we'll be able to read data, this is to support zero-byte reads. + if (output.Length > 0) + { + Debug.Assert(ReadBytesLeft == 0); + ResetPosition(); + } + + if (commandScoped) + return ReadWithTimeout(output); - Debug.Assert(ReadBytesLeft == 0); - Clear(); try { var read = Underlying.Read(output); - if (read == 0) - throw new EndOfStreamException(); + _bufferEndPosition = unchecked(_bufferEndPosition + read); + NpgsqlEventSource.Log.BytesRead(read); return read; } catch (Exception e) @@ -517,30 +629,35 @@ public int Read(Span output) } } - public ValueTask ReadAsync(Memory output, CancellationToken cancellationToken = default) + public ValueTask ReadAsync(bool commandScoped, Memory output, CancellationToken cancellationToken = default) { - if (output.Length == 0) - return new ValueTask(0); - var readFromBuffer = Math.Min(ReadBytesLeft, output.Length); if (readFromBuffer > 0) { - new Span(Buffer, ReadPosition, readFromBuffer).CopyTo(output.Span); - ReadPosition += readFromBuffer; + Buffer.AsSpan(ReadPosition, readFromBuffer).CopyTo(output.Span); + ReadBytesLeft -= readFromBuffer; return new ValueTask(readFromBuffer); } - return ReadAsyncLong(this, output, cancellationToken); + return ReadAsyncLong(this, commandScoped, output, cancellationToken); - static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, Memory output, CancellationToken cancellationToken) + static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, bool commandScoped, Memory output, CancellationToken cancellationToken) { - Debug.Assert(buffer.ReadBytesLeft == 0); - buffer.Clear(); + // Only reset if we'll be able to read data, this is to support zero-byte reads. + if (output.Length > 0) + { + Debug.Assert(buffer.ReadBytesLeft == 0); + buffer.ResetPosition(); + } + + if (commandScoped) + return await buffer.ReadWithTimeoutAsync(output, cancellationToken).ConfigureAwait(false); + try { - var read = await buffer.Underlying.ReadAsync(output, cancellationToken); - if (read == 0) - throw new EndOfStreamException(); + var read = await buffer.Underlying.ReadAsync(output, cancellationToken).ConfigureAwait(false); + buffer._bufferEndPosition = unchecked(buffer._bufferEndPosition + read); + NpgsqlEventSource.Log.BytesRead(read); return read; } catch (Exception e) @@ -550,22 +667,13 @@ static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, Memory } } - public Stream GetStream(int len, bool canSeek) - { - if (_columnStream == null) - _columnStream = new ColumnStream(Connector); - - _columnStream.Init(len, canSeek); - return _columnStream; - } - - public TextReader GetPreparedTextReader(string str, Stream stream) + ColumnStream? _lastStream; + public ColumnStream CreateStream(int len, bool canSeek, bool consumeOnDispose = true) { - if (_preparedTextReader is not { IsDisposed: true }) - _preparedTextReader = new PreparedTextReader(); - - _preparedTextReader.Init(str, (ColumnStream)stream); - return _preparedTextReader; + if (_lastStream is not { IsDisposed: true }) + _lastStream = new ColumnStream(Connector); + _lastStream.Init(len, canSeek, Connector.Settings.ReplicationMode == ReplicationMode.Off, consumeOnDispose); + return _lastStream; } /// @@ -590,13 +698,13 @@ public ValueTask ReadNullTerminatedString(bool async, CancellationToken /// Seeks the first null terminator (\0) and returns the string up to it. Reads additional data from the network if a null /// terminator isn't found in the buffered data. /// - ValueTask ReadNullTerminatedString(Encoding encoding, bool async, CancellationToken cancellationToken = default) + public ValueTask ReadNullTerminatedString(Encoding encoding, bool async, CancellationToken cancellationToken = default) { - var index = Buffer.AsSpan(ReadPosition, FilledBytes - ReadPosition).IndexOf((byte)0); + var index = Span.IndexOf((byte)0); if (index >= 0) { var result = new ValueTask(encoding.GetString(Buffer, ReadPosition, index)); - ReadPosition += index + 1; + ReadBytesLeft -= index + 1; return result; } @@ -604,7 +712,7 @@ ValueTask ReadNullTerminatedString(Encoding encoding, bool async, Cancel async ValueTask ReadLong(Encoding encoding, bool async) { - var chunkSize = FilledBytes - ReadPosition; + var chunkSize = ReadBytesLeft; var tempBuf = ArrayPool.Shared.Rent(chunkSize + 1024); try @@ -612,11 +720,11 @@ async ValueTask ReadLong(Encoding encoding, bool async) bool foundTerminator; var byteLen = chunkSize; Array.Copy(Buffer, ReadPosition, tempBuf, 0, chunkSize); - ReadPosition += chunkSize; + ReadBytesLeft -= chunkSize; do { - await ReadMore(async); + await ReadMore(async).ConfigureAwait(false); Debug.Assert(ReadPosition == 0); foundTerminator = false; @@ -645,7 +753,7 @@ async ValueTask ReadLong(Encoding encoding, bool async) ReadPosition = i; } while (!foundTerminator); - ReadPosition++; + ReadBytesLeft--; return encoding.GetString(tempBuf, 0, byteLen); } finally @@ -657,10 +765,10 @@ async ValueTask ReadLong(Encoding encoding, bool async) public ReadOnlySpan GetNullTerminatedBytes() { - var i = Buffer.AsSpan(ReadPosition).IndexOf((byte)0); + var i = Span.IndexOf((byte)0); Debug.Assert(i >= 0); var result = new ReadOnlySpan(Buffer, ReadPosition, i); - ReadPosition += i + 1; + ReadBytesLeft -= i + 1; return result; } @@ -684,17 +792,21 @@ public void Dispose() #region Misc - internal void Clear() + void ResetPosition() { - ReadPosition = 0; + ReadBytesLeft = 0; FilledBytes = 0; } + internal void RebaseBufferEndPosition() => _bufferEndPosition = ReadBytesLeft; + internal void CopyTo(NpgsqlReadBuffer other) { Debug.Assert(other.Size - other.FilledBytes >= ReadBytesLeft); Array.Copy(Buffer, ReadPosition, other.Buffer, other.FilledBytes, ReadBytesLeft); other.FilledBytes += ReadBytesLeft; + other.ReadBytesLeft += ReadBytesLeft; + other._bufferEndPosition = unchecked(other._bufferEndPosition + ReadBytesLeft); } #endregion diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs deleted file mode 100644 index 46b5c8e41a..0000000000 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs +++ /dev/null @@ -1,122 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql.Internal; - -public sealed partial class NpgsqlWriteBuffer -{ - sealed class ParameterStream : Stream - { - readonly NpgsqlWriteBuffer _buf; - bool _disposed; - - internal ParameterStream(NpgsqlWriteBuffer buf) - => _buf = buf; - - internal void Init() - => _disposed = false; - - public override bool CanRead => false; - - public override bool CanWrite => true; - - public override bool CanSeek => false; - - public override long Length => throw new NotSupportedException(); - - public override void SetLength(long value) - => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - - public override long Seek(long offset, SeekOrigin origin) - => throw new NotSupportedException(); - - public override void Flush() - => CheckDisposed(); - - public override Task FlushAsync(CancellationToken cancellationToken = default) - { - CheckDisposed(); - return cancellationToken.IsCancellationRequested - ? Task.FromCanceled(cancellationToken) : Task.CompletedTask; - } - - public override int Read(byte[] buffer, int offset, int count) - => throw new NotSupportedException(); - - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) - => Write(buffer, offset, count, false); - - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Write(buffer, offset, count, true, cancellationToken); - } - - Task Write(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - CheckDisposed(); - - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentNullException(nameof(offset)); - if (count < 0) - throw new ArgumentNullException(nameof(count)); - if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - - while (count > 0) - { - var left = _buf.WriteSpaceLeft; - if (left == 0) - return WriteLong(buffer, offset, count, async, cancellationToken); - - var slice = Math.Min(count, left); - _buf.WriteBytes(buffer, offset, slice); - offset += slice; - count -= slice; - } - - return Task.CompletedTask; - } - - async Task WriteLong(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - while (count > 0) - { - var left = _buf.WriteSpaceLeft; - if (left == 0) - { - await _buf.Flush(async, cancellationToken); - continue; - } - var slice = Math.Min(count, left); - _buf.WriteBytes(buffer, offset, slice); - offset += slice; - count -= slice; - } - } - - void CheckDisposed() - { - if (_disposed) - throw new ObjectDisposedException(null); - } - - protected override void Dispose(bool disposing) - => _disposed = true; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 1a89cff985..6db8974e1e 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -1,9 +1,7 @@ -using System; -using System.Buffers; +using System; using System.Buffers.Binary; using System.Diagnostics; using System.IO; -using System.Linq; using System.Net.Sockets; using System.Runtime.CompilerServices; using System.Text; @@ -12,24 +10,30 @@ using Npgsql.Util; using static System.Threading.Timeout; -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member namespace Npgsql.Internal; /// /// A buffer used by Npgsql to write data to the socket efficiently. /// Provides methods which encode different values types and tracks the current position. /// -public sealed partial class NpgsqlWriteBuffer : IDisposable +sealed class NpgsqlWriteBuffer : IDisposable { #region Fields and Properties + internal static readonly UTF8Encoding UTF8Encoding = new ThrowingUTF8Encoding(); + internal static readonly UTF8Encoding RelaxedUTF8Encoding = Encoding.UTF8 as UTF8Encoding ?? new(false, throwOnInvalidBytes: false); + + sealed class ThrowingUTF8Encoding() : UTF8Encoding(false, throwOnInvalidBytes: true); + internal readonly NpgsqlConnector Connector; internal Stream Underlying { private get; set; } readonly Socket? _underlyingSocket; + internal bool MessageLengthValidation { get; set; } = true; readonly ResettableCancellationTokenSource _timeoutCts; + readonly MetricsReporter? _metricsReporter; /// /// Timeout for sync and async writes @@ -67,14 +71,22 @@ internal TimeSpan Timeout public int WriteSpaceLeft => Size - WritePosition; + // (Re)init to make sure we'll refetch from the write buffer. + internal PgWriter GetWriter(NpgsqlDatabaseInfo typeCatalog, FlushMode flushMode = FlushMode.None) + => _pgWriter.Init(typeCatalog, flushMode); + internal readonly byte[] Buffer; readonly Encoder _textEncoder; internal int WritePosition; - ParameterStream? _parameterStream; + int _messageBytesFlushed; + int? _messageLength; bool _disposed; + readonly PgWriter _pgWriter; + + Span Span => Buffer.AsSpan(WritePosition, WriteSpaceLeft); /// /// The minimum buffer size possible. @@ -87,24 +99,25 @@ internal TimeSpan Timeout #region Constructors internal NpgsqlWriteBuffer( - NpgsqlConnector connector, + NpgsqlConnector? connector, Stream stream, Socket? socket, int size, Encoding textEncoding) { - if (size < MinimumSize) - throw new ArgumentOutOfRangeException(nameof(size), size, "Buffer size must be at least " + MinimumSize); + ArgumentOutOfRangeException.ThrowIfLessThan(size, MinimumSize); - Connector = connector; + Connector = connector!; // TODO: Clean this up; only null when used from PregeneratedMessages, where we don't care. Underlying = stream; _underlyingSocket = socket; + _metricsReporter = connector?.DataSource.MetricsReporter!; _timeoutCts = new ResettableCancellationTokenSource(); Buffer = new byte[size]; Size = size; TextEncoding = textEncoding; _textEncoder = TextEncoding.GetEncoder(); + _pgWriter = new PgWriter(new NpgsqlBufferWriter(this)); } #endregion @@ -126,6 +139,8 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul WritePosition = pos; } else if (WritePosition == 0) return; + else + AdvanceMessageBytesFlushed(WritePosition); var finalCt = async && Timeout > TimeSpan.Zero ? _timeoutCts.Start(cancellationToken) @@ -135,9 +150,9 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul { if (async) { - await Underlying.WriteAsync(Buffer, 0, WritePosition, finalCt); - await Underlying.FlushAsync(finalCt); - if (Timeout > TimeSpan.Zero) + await Underlying.WriteAsync(Buffer, 0, WritePosition, finalCt).ConfigureAwait(false); + await Underlying.FlushAsync(finalCt).ConfigureAwait(false); + if (Timeout > TimeSpan.Zero) _timeoutCts.Stop(); } else @@ -146,31 +161,29 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul Underlying.Flush(); } } - catch (Exception e) + catch (Exception ex) { // Stopping twice (in case the previous Stop() call succeeded) doesn't hurt. // Not stopping will cause an assertion failure in debug mode when we call Start() the next time. // We can't stop in a finally block because Connector.Break() will dispose the buffer and the contained // _timeoutCts _timeoutCts.Stop(); - switch (e) + switch (ex) { // User requested the cancellation - case OperationCanceledException _ when (cancellationToken.IsCancellationRequested): - throw Connector.Break(e); + case OperationCanceledException when cancellationToken.IsCancellationRequested: + throw Connector.Break(ex); // Read timeout - case OperationCanceledException _: - // Note that mono throws SocketException with the wrong error (see #1330) - case IOException _ when (e.InnerException as SocketException)?.SocketErrorCode == - (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): - Debug.Assert(e is OperationCanceledException ? async : !async); + case OperationCanceledException: + case IOException { InnerException: SocketException { SocketErrorCode: SocketError.TimedOut } }: + Debug.Assert(ex is OperationCanceledException ? async : !async); throw Connector.Break(new NpgsqlException("Exception while writing to stream", new TimeoutException("Timeout during writing attempt"))); } - throw Connector.Break(new NpgsqlException("Exception while writing to stream", e)); + throw Connector.Break(new NpgsqlException("Exception while writing to stream", ex)); } NpgsqlEventSource.Log.BytesWritten(WritePosition); - //NpgsqlEventSource.Log.RequestFailed(); + _metricsReporter?.ReportBytesWritten(WritePosition); WritePosition = 0; if (_copyMode) @@ -194,15 +207,19 @@ internal void DirectWrite(ReadOnlySpan buffer) Debug.Assert(WritePosition == 5); WritePosition = 1; - WriteInt32(buffer.Length + 4); + WriteInt32(checked(buffer.Length + 4)); WritePosition = 5; _copyMode = false; + StartMessage(5); Flush(); _copyMode = true; WriteCopyDataHeader(); // And ready the buffer after the direct write completes } else + { Debug.Assert(WritePosition == 0); + AdvanceMessageBytesFlushed(buffer.Length); + } try { @@ -216,7 +233,7 @@ internal void DirectWrite(ReadOnlySpan buffer) internal async Task DirectWrite(ReadOnlyMemory memory, bool async, CancellationToken cancellationToken = default) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); if (_copyMode) { @@ -225,20 +242,24 @@ internal async Task DirectWrite(ReadOnlyMemory memory, bool async, Cancell Debug.Assert(WritePosition == 5); WritePosition = 1; - WriteInt32(memory.Length + 4); + WriteInt32(checked(memory.Length + 4)); WritePosition = 5; _copyMode = false; - await Flush(async, cancellationToken); + StartMessage(5); + await Flush(async, cancellationToken).ConfigureAwait(false); _copyMode = true; WriteCopyDataHeader(); // And ready the buffer after the direct write completes } else + { Debug.Assert(WritePosition == 0); + AdvanceMessageBytesFlushed(memory.Length); + } try { if (async) - await Underlying.WriteAsync(memory, cancellationToken); + await Underlying.WriteAsync(memory, cancellationToken).ConfigureAwait(false); else Underlying.Write(memory.Span); } @@ -252,186 +273,109 @@ internal async Task DirectWrite(ReadOnlyMemory memory, bool async, Cancell #region Write Simple - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteSByte(sbyte value) => Write(value); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteByte(byte value) => Write(value); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void WriteInt16(int value) - => WriteInt16((short)value, false); + public void WriteByte(byte value) + { + CheckBounds(); + Buffer[WritePosition] = value; + WritePosition += sizeof(byte); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteInt16(short value) - => WriteInt16(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteInt16(short value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(short); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteUInt16(ushort value) - => WriteUInt16(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt16(ushort value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(ushort); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteInt32(int value) - => WriteInt32(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteInt32(int value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(int); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteUInt32(uint value) - => WriteUInt32(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt32(uint value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(uint); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteInt64(long value) - => WriteInt64(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteInt64(long value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt64(ulong value) - => WriteUInt64(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt64(ulong value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteSingle(float value) - => WriteSingle(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteSingle(float value, bool littleEndian) - => WriteInt32(Unsafe.As(ref value), littleEndian); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteDouble(double value) - => WriteDouble(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteDouble(double value, bool littleEndian) - => WriteInt64(Unsafe.As(ref value), littleEndian); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(long); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Write(T value) + [Conditional("DEBUG")] + unsafe void CheckBounds() where T : unmanaged { - if (Unsafe.SizeOf() > WriteSpaceLeft) + if (sizeof(T) > WriteSpaceLeft) ThrowNotSpaceLeft(); - - Unsafe.WriteUnaligned(ref Buffer[WritePosition], value); - WritePosition += Unsafe.SizeOf(); } - [MethodImpl(MethodImplOptions.NoInlining)] static void ThrowNotSpaceLeft() - => throw new InvalidOperationException("There is not enough space left in the buffer."); + => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); public Task WriteString(string s, int byteLen, bool async, CancellationToken cancellationToken = default) - => WriteString(s, s.Length, byteLen, async, cancellationToken); - - public Task WriteString(string s, int charLen, int byteLen, bool async, CancellationToken cancellationToken = default) - { - if (byteLen <= WriteSpaceLeft) - { - WriteString(s, charLen); - return Task.CompletedTask; - } - return WriteStringLong(this, async, s, charLen, byteLen, cancellationToken); - - static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s, int charLen, int byteLen, CancellationToken cancellationToken) - { - Debug.Assert(byteLen > buffer.WriteSpaceLeft); - if (byteLen <= buffer.Size) - { - // String can fit entirely in an empty buffer. Flush and retry rather than - // going into the partial writing flow below (which requires ToCharArray()) - await buffer.Flush(async, cancellationToken); - buffer.WriteString(s, charLen); - } - else - { - var charPos = 0; - while (true) - { - buffer.WriteStringChunked(s, charPos, charLen - charPos, true, out var charsUsed, out var completed); - if (completed) - break; - await buffer.Flush(async, cancellationToken); - charPos += charsUsed; - } - } - } - } - - internal Task WriteChars(char[] chars, int offset, int charLen, int byteLen, bool async, CancellationToken cancellationToken = default) { if (byteLen <= WriteSpaceLeft) { - WriteChars(chars, offset, charLen); + WriteString(s); return Task.CompletedTask; } - return WriteCharsLong(this, async, chars, offset, charLen, byteLen, cancellationToken); + return WriteStringLong(this, async, s, byteLen, cancellationToken); - static async Task WriteCharsLong(NpgsqlWriteBuffer buffer, bool async, char[] chars, int offset, int charLen, int byteLen, CancellationToken cancellationToken) + static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s, int byteLen, CancellationToken cancellationToken) { Debug.Assert(byteLen > buffer.WriteSpaceLeft); if (byteLen <= buffer.Size) { // String can fit entirely in an empty buffer. Flush and retry rather than - // going into the partial writing flow below (which requires ToCharArray()) - await buffer.Flush(async, cancellationToken); - buffer.WriteChars(chars, offset, charLen); + // going into the partial writing flow below + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); + buffer.WriteString(s); } else { - var charPos = 0; + var encoder = buffer._textEncoder; + encoder.Reset(); + var data = s.AsMemory(); + var minBufferSize = buffer.TextEncoding.GetMaxByteCount(1); - while (true) + bool completed; + do { - buffer.WriteStringChunked(chars, charPos + offset, charLen - charPos, true, out var charsUsed, out var completed); - if (completed) - break; - await buffer.Flush(async, cancellationToken); - charPos += charsUsed; - } + if (buffer.WriteSpaceLeft < minBufferSize) + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); + encoder.Convert(data.Span, buffer.Span, flush: true, out var charsUsed, out var bytesUsed, out completed); + data = data.Slice(charsUsed); + buffer.WritePosition += bytesUsed; + } while (!completed); } } } - public void WriteString(string s, int len = 0) + public void WriteString(string s) { Debug.Assert(TextEncoding.GetByteCount(s) <= WriteSpaceLeft); - WritePosition += TextEncoding.GetBytes(s, 0, len == 0 ? s.Length : len, Buffer, WritePosition); - } - - internal void WriteChars(char[] chars, int offset, int len) - { - var charCount = len == 0 ? chars.Length : len; - Debug.Assert(TextEncoding.GetByteCount(chars, 0, charCount) <= WriteSpaceLeft); - WritePosition += TextEncoding.GetBytes(chars, offset, charCount, Buffer, WritePosition); - } - -#if !NETSTANDARD2_0 - internal void WriteChars(ReadOnlySpan chars) - { - Debug.Assert(TextEncoding.GetByteCount(chars) <= WriteSpaceLeft); - WritePosition += TextEncoding.GetBytes(chars, Buffer.AsSpan(WritePosition)); + WritePosition += TextEncoding.GetBytes(s, 0, s.Length, Buffer, WritePosition); } -#endif public void WriteBytes(ReadOnlySpan buf) { @@ -440,10 +384,15 @@ public void WriteBytes(ReadOnlySpan buf) WritePosition += buf.Length; } + public void WriteBytes(ReadOnlyMemory buf) + => WriteBytes(buf.Span); + + public void WriteBytes(byte[] buf) => WriteBytes(buf.AsSpan()); + public void WriteBytes(byte[] buf, int offset, int count) => WriteBytes(new ReadOnlySpan(buf, offset, count)); - public Task WriteBytesRaw(byte[] bytes, bool async, CancellationToken cancellationToken = default) + public Task WriteBytesRaw(ReadOnlyMemory bytes, bool async, CancellationToken cancellationToken = default) { if (bytes.Length <= WriteSpaceLeft) { @@ -452,13 +401,13 @@ public Task WriteBytesRaw(byte[] bytes, bool async, CancellationToken cancellati } return WriteBytesLong(this, async, bytes, cancellationToken); - static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, byte[] bytes, CancellationToken cancellationToken) + static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, ReadOnlyMemory bytes, CancellationToken cancellationToken) { if (bytes.Length <= buffer.Size) { // value can fit entirely in an empty buffer. Flush and retry rather than // going into the partial writing flow below - await buffer.Flush(async, cancellationToken); + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); buffer.WriteBytes(bytes); } else @@ -467,10 +416,10 @@ static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, byte[] by do { if (buffer.WriteSpaceLeft == 0) - await buffer.Flush(async, cancellationToken); + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); var writeLen = Math.Min(remaining, buffer.WriteSpaceLeft); var offset = bytes.Length - remaining; - buffer.WriteBytes(bytes, offset, writeLen); + buffer.WriteBytes(bytes.Slice(offset, writeLen)); remaining -= writeLen; } while (remaining > 0); @@ -478,86 +427,20 @@ static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, byte[] by } } - public async Task WriteStreamRaw(Stream stream, int count, bool async, CancellationToken cancellationToken = default) - { - while (count > 0) - { - if (WriteSpaceLeft == 0) - await Flush(async, cancellationToken); - try - { - var read = async - ? await stream.ReadAsync(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count), cancellationToken) - : stream.Read(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count)); - if (read == 0) - throw new EndOfStreamException(); - WritePosition += read; - count -= read; - } - catch (Exception e) - { - throw Connector.Break(new NpgsqlException("Exception while writing to stream", e)); - } - } - Debug.Assert(count == 0); - } - public void WriteNullTerminatedString(string s) { - Debug.Assert(s.All(c => c < 128), "Method only supports ASCII strings"); + AssertASCIIOnly(s); Debug.Assert(WriteSpaceLeft >= s.Length + 1); WritePosition += Encoding.ASCII.GetBytes(s, 0, s.Length, Buffer, WritePosition); WriteByte(0); } - #endregion - - #region Write Complex - - public Stream GetStream() - { - if (_parameterStream == null) - _parameterStream = new ParameterStream(this); - - _parameterStream.Init(); - return _parameterStream; - } - - internal void WriteStringChunked(char[] chars, int charIndex, int charCount, - bool flush, out int charsUsed, out bool completed) + public void WriteNullTerminatedString(byte[] s) { - if (WriteSpaceLeft < _textEncoder.GetByteCount(chars, charIndex, char.IsHighSurrogate(chars[charIndex]) ? 2 : 1, flush: false)) - { - charsUsed = 0; - completed = false; - return; - } - - _textEncoder.Convert(chars, charIndex, charCount, Buffer, WritePosition, WriteSpaceLeft, - flush, out charsUsed, out var bytesUsed, out completed); - WritePosition += bytesUsed; - } - - internal unsafe void WriteStringChunked(string s, int charIndex, int charCount, - bool flush, out int charsUsed, out bool completed) - { - int bytesUsed; - - fixed (char* sPtr = s) - fixed (byte* bufPtr = Buffer) - { - if (WriteSpaceLeft < _textEncoder.GetByteCount(sPtr + charIndex, char.IsHighSurrogate(*(sPtr + charIndex)) ? 2 : 1, flush: false)) - { - charsUsed = 0; - completed = false; - return; - } - - _textEncoder.Convert(sPtr + charIndex, charCount, bufPtr + WritePosition, WriteSpaceLeft, - flush, out charsUsed, out bytesUsed, out completed); - } - - WritePosition += bytesUsed; + AssertASCIIOnly(s); + Debug.Assert(WriteSpaceLeft >= s.Length + 1); + WriteBytes(s); + WriteByte(0); } #endregion @@ -606,9 +489,50 @@ public void Dispose() #region Misc + internal void StartMessage(int messageLength) + { + if (!MessageLengthValidation) + return; + + if (_messageLength is not null && _messageBytesFlushed != _messageLength && WritePosition != -_messageBytesFlushed + _messageLength) + Throw(); + + // Add negative WritePosition to compensate for previous message(s) written without flushing. + _messageBytesFlushed = -WritePosition; + _messageLength = messageLength; + + void Throw() + { + throw Connector.Break(new OverflowException("Did not write the amount of bytes the message length specified")); + } + } + + void AdvanceMessageBytesFlushed(int count) + { + if (!MessageLengthValidation) + return; + + if (count < 0 || _messageLength is null || (long)_messageBytesFlushed + count > _messageLength) + Throw(); + + _messageBytesFlushed += count; + + void Throw() + { + ArgumentOutOfRangeException.ThrowIfNegative(count); + + if (_messageLength is null) + throw Connector.Break(new InvalidOperationException("No message was started")); + + if ((long)_messageBytesFlushed + count > _messageLength) + throw Connector.Break(new OverflowException("Tried to write more bytes than the message length specified")); + } + } + internal void Clear() { WritePosition = 0; + _messageLength = null; } /// @@ -622,5 +546,21 @@ internal byte[] GetContents() return buf; } + [Conditional("DEBUG")] + internal static void AssertASCIIOnly(string s) + { + foreach (var c in s) + if (c >= 128) + Debug.Fail("Method only supports ASCII strings"); + } + + [Conditional("DEBUG")] + internal static void AssertASCIIOnly(byte[] s) + { + foreach (var c in s) + if (c >= 128) + Debug.Fail("Method only supports ASCII strings"); + } + #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs new file mode 100644 index 0000000000..9a61cbb3f1 --- /dev/null +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -0,0 +1,38 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgBufferedConverter(bool customDbNullPredicate = false) : PgConverter(customDbNullPredicate) +{ + protected abstract T ReadCore(PgReader reader); + protected abstract void WriteCore(PgWriter writer, T value); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => throw new NotSupportedException(); + + public sealed override T Read(PgReader reader) => ReadCore(reader); + + public sealed override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => new(Read(reader)); + + internal sealed override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) + => new(Read(reader)!); + + public sealed override void Write(PgWriter writer, T value) => WriteCore(writer, value); + + public sealed override ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default) + { + Write(writer, value); + return new(); + } + + internal sealed override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + Write(writer, (T)value); + return new(); + } +} diff --git a/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs b/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs new file mode 100644 index 0000000000..9a2d854872 --- /dev/null +++ b/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs @@ -0,0 +1,84 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +abstract class PgComposingTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgTypeId? _pgTypeId; + protected PgProviderTypeInfo EffectiveTypeInfo { get; } + readonly ConcurrentDictionary _concreteInfoCache = new(ReferenceEqualityComparer.Instance); + + protected PgComposingTypeInfoProvider(PgTypeId? pgTypeId, PgProviderTypeInfo effectiveTypeInfo) + { + ArgumentNullException.ThrowIfNull(effectiveTypeInfo); + if (pgTypeId is null && effectiveTypeInfo.PgTypeId is not null) + throw new ArgumentNullException(nameof(pgTypeId), $"Cannot be null if {nameof(effectiveTypeInfo)}.{nameof(PgTypeInfo.PgTypeId)} is not null."); + + _pgTypeId = pgTypeId; + EffectiveTypeInfo = effectiveTypeInfo; + } + + protected abstract PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId); + protected abstract PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId); + protected abstract PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType); + protected abstract PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + { + PgTypeId? effectiveTypeId = pgTypeId is { } id ? GetEffectiveTypeId(id) : null; + var concreteTypeInfo = EffectiveTypeInfo.GetDefault(effectiveTypeId); + var composingPgTypeId = _pgTypeId ?? GetPgTypeId(concreteTypeInfo.PgTypeId); + return GetOrAdd(concreteTypeInfo, composingPgTypeId); + } + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, T? value, ref object? writeState) + { + PgTypeId? effectiveTypeId = context.ExpectedPgTypeId is { } id ? GetEffectiveTypeId(id) : null; + var effectiveContext = context with { ExpectedPgTypeId = effectiveTypeId }; + if (GetEffectiveTypeInfo(effectiveContext, value, ref writeState) is { } effectiveTypeInfo) + return GetOrAdd(effectiveTypeInfo, context.ExpectedPgTypeId ?? _pgTypeId ?? GetPgTypeId(effectiveTypeInfo.PgTypeId)); + + return null; + } + + protected override PgConcreteTypeInfo? GetForFieldCore(Field field) + { + if (EffectiveTypeInfo.GetForField(field with { PgTypeId = GetEffectivePgTypeId(field.PgTypeId)}) is not { } concreteTypeInfo) + return null; + + var composingPgTypeId = _pgTypeId ?? GetPgTypeId(concreteTypeInfo.PgTypeId); + return GetOrAdd(concreteTypeInfo, composingPgTypeId); + } + + PgTypeId GetEffectiveTypeId(PgTypeId pgTypeId) + { + // If we have a _pgTypeId match we already know the effective id, and the constructor has verified it is non-null. + if (pgTypeId == _pgTypeId) + return EffectiveTypeInfo.PgTypeId.GetValueOrDefault(); + + // We have an undecided type info which is asked to resolve for a specific type id + // we'll unfortunately have to look up the effective id, this is rare though. + return GetEffectivePgTypeId(pgTypeId); + } + + PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo concreteTypeInfo, PgTypeId pgTypeId) + { + (PgComposingTypeInfoProvider Instance, PgConcreteTypeInfo ConcreteTypeInfo, PgTypeId PgTypeId) + state = (this, concreteTypeInfo, pgTypeId); + return _concreteInfoCache.GetOrAdd( + concreteTypeInfo, + static (_, state) + => new(state.ConcreteTypeInfo.Options, + state.Instance.CreateConverter(state.ConcreteTypeInfo, out var requestedType), + state.PgTypeId, + requestedType: requestedType) + { + SupportsReading = state.ConcreteTypeInfo.SupportsReading, + SupportsWriting = state.ConcreteTypeInfo.SupportsWriting + }, + state); + } +} diff --git a/src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs b/src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs new file mode 100644 index 0000000000..a52c977838 --- /dev/null +++ b/src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs @@ -0,0 +1,105 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgConcreteTypeInfoProvider +{ + private protected PgConcreteTypeInfoProvider() { } + + /// + /// Gets the appropriate type info solely based on PgTypeId. + /// + public PgConcreteTypeInfo GetDefault(PgTypeId? pgTypeId) + { + var result = GetDefaultCore(pgTypeId); + if (pgTypeId is { } id && result.PgTypeId != id) + ThrowPgTypeIdMismatch(nameof(GetDefaultCore)); + return result; + } + + /// + /// Gets the appropriate type info based on the given field info. + /// + public PgConcreteTypeInfo? GetForField(Field field) + { + var result = GetForFieldCore(field); + if (result is not null && result.PgTypeId != field.PgTypeId) + ThrowPgTypeIdMismatch(nameof(GetForFieldCore)); + return result; + } + + /// + /// Gets the appropriate type info based on the given value and expected type id. + /// + public PgConcreteTypeInfo? GetForValueAsObject(ProviderValueContext context, object? value, ref object? writeState) + { + var result = GetForValueAsObjectCore(context, value, ref writeState); + if (context.ExpectedPgTypeId is { } id && result is not null && result.PgTypeId != id) + ThrowPgTypeIdMismatch(nameof(GetForValueAsObjectCore)); + return result; + } + + /// + /// Gets the default concrete type info for a given PgTypeId. + /// + /// + /// Implementations should not return new instances of the possible infos that can be returned, instead its expected these are cached once returned. + /// Composing providers depend on this to cache their own infos - wrapping the element info - with the cache key being the element info reference. + /// + protected abstract PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId); + + /// + /// Gets the concrete type info for a given field. + /// + /// + /// Implementations should not return new instances of the possible infos that can be returned, instead its expected these are cached once returned. + /// Composing providers depend on this to cache their own infos - wrapping the element info - with the cache key being the element info reference. + /// + protected virtual PgConcreteTypeInfo? GetForFieldCore(Field field) => null; + + internal abstract Type TypeToConvert { get; } + + private protected abstract PgConcreteTypeInfo? GetForValueAsObjectCore(ProviderValueContext context, object? value, ref object? writeState); + + private protected static void ThrowPgTypeIdMismatch(string methodName) + => throw new InvalidOperationException( + $"'{methodName}' incorrectly returned a different {nameof(PgTypeId)} in its concrete type info than the caller passed in."); +} + +public abstract class PgConcreteTypeInfoProvider : PgConcreteTypeInfoProvider +{ + /// + /// Gets the appropriate type info based on the given value and expected type id. + /// + public PgConcreteTypeInfo? GetForValue(ProviderValueContext context, T? value, ref object? writeState) + { + var result = GetForValueCore(context, value, ref writeState); + if (context.ExpectedPgTypeId is { } id && result is not null && result.PgTypeId != id) + ThrowPgTypeIdMismatch(nameof(GetForValueCore)); + return result; + } + + /// + /// Gets the concrete type info for a given value and expected type id. + /// + /// + /// Implementations should not return new instances of the possible infos that can be returned, instead its expected these are cached once returned. + /// Composing providers depend on this to cache their own infos - wrapping the element info - with the cache key being the element info reference. + /// + protected abstract PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, T? value, ref object? writeState); + + internal sealed override Type TypeToConvert => typeof(T); + + // If null was passed while it is not a valid value for T we directly return null. + // This allows concrete info to be produced by falling back to GetDefault afterwards. + private protected sealed override PgConcreteTypeInfo? GetForValueAsObjectCore(ProviderValueContext context, object? value, ref object? writeState) + => default(T) is null || value is not null ? GetForValueCore(context, (T?)value, ref writeState) : null; +} + +public readonly struct ProviderValueContext +{ + public PgTypeId? ExpectedPgTypeId { get; init; } +} diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs new file mode 100644 index 0000000000..ddbc21a9a3 --- /dev/null +++ b/src/Npgsql/Internal/PgConverter.cs @@ -0,0 +1,294 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.ComponentModel; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgConverter +{ + internal DbNullPredicate DbNullPredicateKind { get; } + public bool IsDbNullable => DbNullPredicateKind is not DbNullPredicate.None; + + private protected PgConverter(Type type, bool isNullDefaultValue, bool customDbNullPredicate = false) + { + TypeToConvert = type; + DbNullPredicateKind = customDbNullPredicate ? DbNullPredicate.Custom : InferDbNullPredicate(type, isNullDefaultValue); + } + + /// + /// Whether this converter can handle the given format and with which buffer requirements. + /// + /// The data format. + /// Returns the buffer requirements. + /// Returns true if the given data format is supported. + /// The buffer requirements should not cover database NULL reads or writes, these are handled by the caller. + public abstract bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements); + + internal Type TypeToConvert { get; } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + PgConverter UnsafeAs() + { + // Justification: avoid perf cost of casting to a known base class type per dispatch call. + Debug.Assert(typeof(T) == TypeToConvert); + Debug.Assert(this is PgConverter); + return Unsafe.As>(this); + } + + /// Reads a value from the reader as . + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public T Read(PgReader reader) + => typeof(T) != TypeToConvert + ? (T)ReadAsObject(reader) + : UnsafeAs().Read(reader); + + /// Asynchronously reads a value from the reader as . + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + if (typeof(T) != TypeToConvert) + { + var task = ReadAsObjectAsync(reader, cancellationToken); + return task.IsCompletedSuccessfully ? new((T)task.Result) : ReadAndUnboxAsync(task); + } + + return UnsafeAs().ReadAsync(reader, cancellationToken); + + [MethodImpl(MethodImplOptions.NoInlining)] + static async ValueTask ReadAndUnboxAsync(ValueTask task) + => (T)await task.ConfigureAwait(false); + } + + /// Writes a value to the writer. + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void Write(PgWriter writer, [DisallowNull] T value) + { + if (typeof(T) != TypeToConvert) + { + WriteAsObject(writer, value); + return; + } + UnsafeAs().Write(writer, value); + } + + /// Asynchronously writes a value to the writer. + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default) + => typeof(T) != TypeToConvert + ? WriteAsObjectAsync(writer, value, cancellationToken) + : UnsafeAs().WriteAsync(writer, value, cancellationToken); + + internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, object? writeState) + => DbNullPredicateKind switch + { + DbNullPredicate.Null => value is null, + DbNullPredicate.None => false, + DbNullPredicate.PolymorphicNull => value is null or DBNull, + // We do the null check to keep the NotNullWhen(false) invariant. + DbNullPredicate.Custom => IsDbNullValueAsObject(value, writeState) || (value is null && ThrowInvalidNullValue()), + _ => ThrowDbNullPredicateOutOfRange() + }; + + private protected abstract bool IsDbNullValueAsObject(object? value, object? writeState); + + internal abstract Size GetSizeAsObject(SizeContext context, object value, ref object? writeState); + + internal object ReadAsObject(PgReader reader) + => ReadAsObject(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + internal ValueTask ReadAsObjectAsync(PgReader reader, CancellationToken cancellationToken = default) + => ReadAsObject(async: true, reader, cancellationToken); + + // Shared sync/async abstract to reduce virtual method table size overhead and code size for each NpgsqlConverter instantiation. + internal abstract ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken); + + internal void WriteAsObject(PgWriter writer, object value) + => WriteAsObject(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + internal ValueTask WriteAsObjectAsync(PgWriter writer, object value, CancellationToken cancellationToken = default) + => WriteAsObject(async: true, writer, value, cancellationToken); + + // Shared sync/async abstract to reduce virtual method table size overhead and code size for each NpgsqlConverter instantiation. + internal abstract ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken); + + static DbNullPredicate InferDbNullPredicate(Type type, bool isNullDefaultValue) + => type == typeof(object) || type == typeof(DBNull) + ? DbNullPredicate.PolymorphicNull + : isNullDefaultValue + ? DbNullPredicate.Null + : DbNullPredicate.None; + + internal enum DbNullPredicate : byte + { + /// Never DbNull (struct types) + None, + /// DbNull when *user code* + Custom, + /// DbNull when value is null + Null, + /// DbNull when value is null or DBNull + PolymorphicNull + } + + [DoesNotReturn] + private protected void ThrowIORequired(Size bufferRequirement) + => throw new InvalidOperationException($"Buffer requirement '{bufferRequirement}' not respected for converter '{GetType().FullName}', expected no IO to be required."); + + private protected static bool ThrowInvalidNullValue() + => throw new ArgumentNullException("value", "Null value given for non-nullable type converter"); + + private protected bool ThrowDbNullPredicateOutOfRange() + => throw new UnreachableException($"Unknown case {DbNullPredicateKind.ToString()}"); +} + +public abstract class PgConverter : PgConverter +{ + private protected PgConverter(bool customDbNullPredicate) + : base(typeof(T), default(T) is null, customDbNullPredicate) { } + +#pragma warning disable CS0618 // Obsolete - delegates to ref overload for binary compat with existing overrides + protected virtual bool IsDbNullValue(T? value, object? writeState) + { + // The obsolete ref overload is kept around for binary compatibility on the signature, but + // mutating writeState during a null probe is no longer a supported behaviour. Detect the + // mutation via a local captured before the forward and throw — a violating override is a + // bug in the derived converter, not something to defend against here. + var originalWriteState = writeState; + var isDbNull = IsDbNullValue(value, ref writeState); + if (!ReferenceEquals(writeState, originalWriteState)) + ThrowHelper.ThrowInvalidOperationException( + $"{GetType().FullName} mutated writeState from its IsDbNullValue override. Override the overload without ref and produce write state only in GetSize."); + return isDbNull; + } +#pragma warning restore CS0618 + + [Obsolete("Use the overload without ref.")] + [EditorBrowsable(EditorBrowsableState.Never)] + protected virtual bool IsDbNullValue(T? value, ref object? writeState) => throw new NotSupportedException(); + + // Object null semantics as follows, if T is a struct (so excluding nullable) report false for null values, don't throw on the cast. + // As a result this creates symmetry with IsDbNull when we're dealing with a struct T, as it cannot be passed null at all. + private protected override bool IsDbNullValueAsObject(object? value, object? writeState) + => (default(T) is null || value is not null) && IsDbNullValue((T?)value, writeState); + + /// Checks whether is considered a database null by this converter. + public bool IsDbNull([NotNullWhen(false)] T? value, object? writeState) + => DbNullPredicateKind switch + { + DbNullPredicate.Null => value is null, + DbNullPredicate.None => false, + DbNullPredicate.PolymorphicNull => value is null or DBNull, + // We do the null check to keep the NotNullWhen(false) invariant. + DbNullPredicate.Custom => IsDbNullValue(value, writeState) || (value is null && ThrowInvalidNullValue()), + _ => ThrowDbNullPredicateOutOfRange() + }; + + [Obsolete("Use the overload without ref.")] + [EditorBrowsable(EditorBrowsableState.Never)] + public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) + => IsDbNull(value, writeState); + + /// Reads a value from the reader. + public abstract T Read(PgReader reader); + /// Asynchronously reads a value from the reader. + public abstract ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default); + + /// Computes the serialized size for , producing any required . + public abstract Size GetSize(SizeContext context, [DisallowNull]T value, ref object? writeState); + + /// Writes a value to the writer. + public abstract void Write(PgWriter writer, [DisallowNull] T value); + /// Asynchronously writes a value to the writer. + public abstract ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default); + + internal sealed override Size GetSizeAsObject(SizeContext context, object value, ref object? writeState) + => GetSize(context, (T)value, ref writeState); +} + +static class PgConverterExtensions +{ + public static Size? IsDbNullOrGetSize(this PgConverter converter, DataFormat format, Size writeRequirement, T? value, ref object? writeState) + { + if (converter.IsDbNull(value, writeState)) + return null; + + if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) + return byteCount; + var size = converter.GetSize(new(format, writeRequirement), value, ref writeState); + + switch (size.Kind) + { + case SizeKind.UpperBound: + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.UpperBound)} is not a valid return value for GetSize."); + break; + case SizeKind.Unknown: + // Not valid yet. + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.Unknown)} is not a valid return value for GetSize."); + break; + } + + return size; + } + + public static Size? IsDbNullOrGetSizeAsObject(this PgConverter converter, DataFormat format, Size writeRequirement, object? value, ref object? writeState) + { + if (converter.IsDbNullAsObject(value, writeState)) + return null; + + if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) + return byteCount; + var size = converter.GetSizeAsObject(new(format, writeRequirement), value, ref writeState); + + switch (size.Kind) + { + case SizeKind.UpperBound: + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.UpperBound)} is not a valid return value for GetSize."); + break; + case SizeKind.Unknown: + // Not valid yet. + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.Unknown)} is not a valid return value for GetSize."); + break; + } + + return size; + } +} + +[method: SetsRequiredMembers] +public readonly struct SizeContext(DataFormat format, Size bufferRequirement) +{ + public required Size BufferRequirement { get; init; } = bufferRequirement; + public DataFormat Format { get; } = format; +} + +class MultiWriteState : IDisposable +{ + public ArrayPool<(Size Size, object? WriteState)>? ArrayPool { get; set; } + public ArraySegment<(Size Size, object? WriteState)> Data { get; set; } + public bool AnyWriteState { get; set; } + + public void Dispose() + { + if (Data.Array is not { } array) + return; + + if (AnyWriteState) + { + for (var i = Data.Offset; i < Data.Offset + Data.Count; i++) + if (array[i].WriteState is IDisposable disposable) + disposable.Dispose(); + + Array.Clear(Data.Array, Data.Offset, Data.Count); + } + + ArrayPool?.Return(Data.Array); + } +} diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs new file mode 100644 index 0000000000..39b99b921a --- /dev/null +++ b/src/Npgsql/Internal/PgReader.cs @@ -0,0 +1,869 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Util; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public class PgReader +{ + const int DbNullSentinel = -1; + const int UninitializedSentinel = -1; + + // We don't want to add a ton of memory pressure for large strings. + internal const int MaxPreparedTextReaderSize = 1024 * 64; + + readonly NpgsqlReadBuffer _buffer; + + bool _resumable; + + byte[]? _pooledArray; + Stream? _userActiveStream; + PreparedTextReader? _preparedTextReader; + + long _fieldStartPos; + long _fieldEndPos; + Size _fieldBufferRequirement; + DataFormat _fieldFormat; + int _fieldSize; + + // This position is relative to _fieldStartPos, which is why it can be an int. + int _currentStartPos; + Size _currentBufferRequirement; + int _currentSize; + + // GetChars Internal state + TextReader? _getCharsReader; + int _getCharsRead; + + // GetChars User state + int? _charsReadOffset; + ArraySegment? _charsReadBuffer; + + bool _requiresCleanup; + + internal PgReader(NpgsqlReadBuffer buffer) + { + _buffer = buffer; + _fieldStartPos = UninitializedSentinel; + _currentSize = UninitializedSentinel; + } + + internal bool Initialized => _fieldStartPos is not UninitializedSentinel; + int FieldOffset => (int)(_buffer.CumulativeReadPosition - _fieldStartPos); + int FieldSize => _fieldSize; + int FieldRemaining => FieldSize - FieldOffset; + + internal bool FieldIsDbNull => FieldSize is DbNullSentinel; + internal bool FieldAtStart => FieldOffset is 0; + + internal bool IsFieldPastOffset(int offset) => FieldOffset > offset; + + // TODO refactor out + internal long GetFieldStartPos(NpgsqlNestedDataReader nestedDataReader) => _fieldStartPos; + // TODO refactor out + internal int GetFieldOffset(NpgsqlNestedDataReader nestedDataReader) => FieldOffset; + + internal bool NestedInitialized => _currentSize is not UninitializedSentinel; + int CurrentSize => NestedInitialized ? _currentSize : _fieldSize; + + public ValueMetadata Current => new() { Size = CurrentSize, Format = _fieldFormat, BufferRequirement = CurrentBufferRequirement }; + public int CurrentRemaining => NestedInitialized ? _currentSize - CurrentOffset : FieldRemaining; + + internal Size CurrentBufferRequirement => NestedInitialized ? _currentBufferRequirement : _fieldBufferRequirement; + int CurrentOffset => FieldOffset - _currentStartPos; + + internal bool Resumable => _resumable; + public bool IsResumed => Resumable && CurrentOffset > 0; + + internal bool StreamCanSeek { get; set; } + + ArrayPool ArrayPool => ArrayPool.Shared; + + // Here for testing purposes + internal void BreakConnection() => throw _buffer.Connector.Break(new Exception("Broken")); + + internal void Reset() + { + if (Initialized) + ThrowHelper.ThrowInvalidOperationException("Cannot reset an initialized reader."); + + StreamCanSeek = false; + } + + internal void RevertNestedReadScope(int size, int startPos, Size bufferRequirement) + { + if (startPos > FieldOffset) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(startPos), "Can't revert forwardly"); + + _currentStartPos = startPos; + _currentBufferRequirement = bufferRequirement; + _currentSize = size; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + void CheckBounds(int count) + { + if (_buffer.CumulativeReadPosition > _fieldEndPos - count) + Throw(); + + static void Throw() + => ThrowHelper.ThrowIndexOutOfRangeException("Attempt to read past the end of the field."); + } + + public byte ReadByte() + { + CheckBounds(sizeof(byte)); + var result = _buffer.ReadByte(); + return result; + } + + public short ReadInt16() + { + CheckBounds(sizeof(short)); + var result = _buffer.ReadInt16(); + return result; + } + + public int ReadInt32() + { + CheckBounds(sizeof(int)); + var result = _buffer.ReadInt32(); + return result; + } + + public long ReadInt64() + { + CheckBounds(sizeof(long)); + var result = _buffer.ReadInt64(); + return result; + } + + public ushort ReadUInt16() + { + CheckBounds(sizeof(ushort)); + var result = _buffer.ReadUInt16(); + return result; + } + + public uint ReadUInt32() + { + CheckBounds(sizeof(uint)); + var result = _buffer.ReadUInt32(); + return result; + } + + public ulong ReadUInt64() + { + CheckBounds(sizeof(ulong)); + var result = _buffer.ReadUInt64(); + return result; + } + + public float ReadFloat() + { + CheckBounds(sizeof(float)); + var result = _buffer.ReadSingle(); + return result; + } + + public double ReadDouble() + { + CheckBounds(sizeof(double)); + var result = _buffer.ReadDouble(); + return result; + } + + public void Read(Span destination) + { + CheckBounds(destination.Length); + _buffer.ReadBytes(destination); + } + + public async ValueTask ReadNullTerminatedStringAsync(Encoding encoding, CancellationToken cancellationToken = default) + { + var result = await _buffer.ReadNullTerminatedString(encoding, async: true, cancellationToken).ConfigureAwait(false); + // Can only check after the fact. + CheckBounds(0); + return result; + } + + public string ReadNullTerminatedString(Encoding encoding) + { + var result = _buffer.ReadNullTerminatedString(encoding, async: false, CancellationToken.None).GetAwaiter().GetResult(); + CheckBounds(0); + return result; + } + + public Stream GetStream(int? length = null) => GetStreamCore(length); + Stream GetStreamCore(int? length = null, bool untracked = false) + { + if (length > CurrentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(length), "Length is larger than the current remaining value size"); + + // This will cause any previously handed out StreamReaders etc to throw, as intended. + if (!untracked && UserStreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + + length ??= CurrentRemaining; + var len = length.GetValueOrDefault(); + CheckBounds(len); + + Stream stream; + if (StreamCanSeek && len <= _buffer.ReadBytesLeft) + { + // All data is in the buffer — return an isolated view over the buffer. + stream = new SubReadStream(_buffer.Buffer, _buffer.ReadPosition, len); + _buffer.ReadPosition += len; + } + else + { + stream = _buffer.CreateStream(len, canSeek: false, consumeOnDispose: false); + } + + if (!untracked) + { + _requiresCleanup = true; + _userActiveStream = stream; + } + return stream; + } + + public TextReader GetTextReader(Encoding encoding) + => GetTextReader(async: false, encoding, CancellationToken.None).GetAwaiter().GetResult(); + + public ValueTask GetTextReaderAsync(Encoding encoding, CancellationToken cancellationToken) + => GetTextReader(async: true, encoding, cancellationToken); + + async ValueTask GetTextReader(bool async, Encoding encoding, CancellationToken cancellationToken, bool untracked = false) + { + if (CurrentRemaining > _buffer.ReadBytesLeft || CurrentRemaining > MaxPreparedTextReaderSize) + return new StreamReader(GetStreamCore(untracked: untracked), encoding, detectEncodingFromByteOrderMarks: false); + + if (!untracked && _preparedTextReader is { IsDisposed: false }) + { + _preparedTextReader.Dispose(); + _preparedTextReader = null; + } + + _requiresCleanup = true; + var currentOffset = CurrentOffset; + var currentRemaining = CurrentSize - currentOffset; + + // Always make a new reader for untracked usage, see GetStreamCore. + var preparedTextReader = (untracked ? null : _preparedTextReader) ?? new(); + preparedTextReader.Init(encoding.GetString(async + ? await ReadBytesAsync(currentRemaining, cancellationToken).ConfigureAwait(false) + : ReadBytes(currentRemaining))); + if (!untracked) + _preparedTextReader = preparedTextReader; + + return preparedTextReader; + } + + public ValueTask ReadBytesAsync(Memory buffer, CancellationToken cancellationToken = default) + { + var count = buffer.Length; + CheckBounds(count); + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) + { + _buffer.Buffer.AsSpan(offset, count).CopyTo(buffer.Span); + _buffer.ReadPosition += count; + return new(); + } + + return Slow(count, buffer, cancellationToken); + + async ValueTask Slow(int count, Memory buffer, CancellationToken cancellationToken) + { + var stream = _buffer.CreateStream(count, canSeek: false); + await using var _ = stream.ConfigureAwait(false); + await stream.ReadExactlyAsync(buffer, cancellationToken).ConfigureAwait(false); + } + } + + public void ReadBytes(Span buffer) + { + var count = buffer.Length; + CheckBounds(count); + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) + { + _buffer.Buffer.AsSpan(offset, count).CopyTo(buffer); + _buffer.ReadPosition += count; + return; + } + + Slow(count, buffer); + + void Slow(int count, Span buffer) + { + using var stream = _buffer.CreateStream(count, canSeek: false); + stream.ReadExactly(buffer); + } + } + + public bool TryReadBytes(int count, out ReadOnlySpan bytes) + { + CheckBounds(count); + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) + { + bytes = new ReadOnlySpan(_buffer.Buffer, offset, count); + _buffer.ReadPosition += count; + return true; + } + bytes = default; + return false; + } + + public bool TryReadBytes(int count, out ReadOnlyMemory bytes) + { + CheckBounds(count); + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) + { + bytes = new ReadOnlyMemory(_buffer.Buffer, offset, count); + _buffer.ReadPosition += count; + return true; + } + bytes = default; + return false; + } + + /// ReadBytes without memory management, the next read invalidates the underlying buffer(s), only use this for intermediate transformations. + public ReadOnlySequence ReadBytes(int count) + { + CheckBounds(count); + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) + { + var result = new ReadOnlySequence(_buffer.Buffer, offset, count); + _buffer.ReadPosition += count; + return result; + } + + var array = RentArray(count); + ReadBytes(array.AsSpan(0, count)); + return new(array, 0, count); + } + + /// ReadBytesAsync without memory management, the next read invalidates the underlying buffer(s), only use this for intermediate transformations. + public async ValueTask> ReadBytesAsync(int count, CancellationToken cancellationToken = default) + { + CheckBounds(count); + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) + { + var result = new ReadOnlySequence(_buffer.Buffer, offset, count); + _buffer.ReadPosition += count; + return result; + } + + var array = RentArray(count); + await ReadBytesAsync(array.AsMemory(0, count), cancellationToken).ConfigureAwait(false); + return new(array, 0, count); + } + + public void Rewind(int count) + { + if (CurrentOffset < count) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to rewind past the current field start."); + + if (_buffer.ReadPosition < count) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to rewind past the buffer start, some of this data is no longer part of the underlying buffer."); + + // Shut down any streaming going on on the column + if (UserStreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + + RewindCore(count); + } + + void RewindCore(int count) + { + Debug.Assert(CurrentOffset >= count); + Debug.Assert(_buffer.ReadPosition >= count); + _buffer.ReadPosition -= count; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + ValueTask DisposeUserActiveStream(bool async) + { + var stream = _userActiveStream; + if (stream is not null) + { + _userActiveStream = null; + if (async) + return stream.DisposeAsync(); + + stream.Dispose(); + } + + return new(); + } + + internal int GetCharsRead => _getCharsRead; + internal bool CharsReadActive => _charsReadOffset is not null; + + internal void GetCharsReadInfo(Encoding encoding, out int charsRead, out TextReader reader, out int charsOffset, out ArraySegment? buffer) + { + if (!CharsReadActive) + ThrowHelper.ThrowInvalidOperationException("No active chars read"); + + _requiresCleanup = true; + + charsRead = _getCharsRead; + reader = _getCharsReader ??= GetTextReader(async: false, encoding, default, untracked: true).GetAwaiter().GetResult(); + charsOffset = _charsReadOffset ?? 0; + buffer = _charsReadBuffer; + } + + internal void RestartCharsRead() + { + if (!CharsReadActive) + ThrowHelper.ThrowInvalidOperationException("No active chars read"); + + switch (_getCharsReader) + { + case PreparedTextReader reader: + reader.Restart(); + break; + case StreamReader reader: + reader.BaseStream.Seek(0, SeekOrigin.Begin); + reader.DiscardBufferedData(); + break; + } + _getCharsRead = 0; + } + + internal void AdvanceCharsRead(int charsRead) + { + _getCharsRead += charsRead; + } + + internal void StartCharsRead(int dataOffset, ArraySegment? buffer) + { + if (!Resumable) + ThrowHelper.ThrowInvalidOperationException("Reader was not initialized as resumable"); + + _charsReadOffset = dataOffset; + _charsReadBuffer = buffer; + } + + internal void EndCharsRead() + { + if (!Resumable) + ThrowHelper.ThrowInvalidOperationException("Wasn't initialized as resumed"); + + if (!CharsReadActive) + ThrowHelper.ThrowInvalidOperationException("No active chars read"); + + _charsReadOffset = null; + _charsReadBuffer = null; + } + + internal void Init(DataFormat fieldFormat, int fieldSize, bool resumable = false) + { + if (Initialized) + ThrowHelper.ThrowInvalidOperationException("Already initialized"); + + _fieldStartPos = _buffer.CumulativeReadPosition; + _fieldEndPos = _fieldStartPos + fieldSize; + _fieldSize = fieldSize; + _resumable = resumable; + _fieldFormat = fieldFormat; + } + + internal void StartRead(PgFieldBinding binding) + { + Debug.Assert(FieldSize >= 0); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(binding.BufferRequirement, FieldSize); + _fieldBufferRequirement = binding.BufferRequirement; + if (ShouldBuffer(byteCount)) + BufferNoInlined(byteCount); + + [MethodImpl(MethodImplOptions.NoInlining)] + void BufferNoInlined(int byteCount) + => Buffer(byteCount); + } + + internal ValueTask StartReadAsync(PgFieldBinding binding, CancellationToken cancellationToken) + { + Debug.Assert(FieldSize >= 0); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(binding.BufferRequirement, FieldSize); + _fieldBufferRequirement = binding.BufferRequirement; + return ShouldBuffer(byteCount) ? BufferAsync(byteCount, cancellationToken) : new(); + } + + internal void EndRead() + { + if (_resumable || (_requiresCleanup && UserStreamActive)) + return; + + if (_buffer.CumulativeReadPosition != _fieldEndPos) + { + // If it was upper bound we should consume. + if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + { + Consume(FieldRemaining); + return; + } + + ThrowNotConsumedExactly(); + } + } + + internal ValueTask EndReadAsync() + { + if (_resumable || (_requiresCleanup && UserStreamActive)) + return new(); + + if (_buffer.CumulativeReadPosition != _fieldEndPos) + { + // If it was upper bound we should consume. + if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + return ConsumeAsync(FieldRemaining); + + ThrowNotConsumedExactly(); + } + + return new(); + } + + internal async ValueTask BeginNestedRead(bool async, int size, Size bufferRequirement, CancellationToken cancellationToken = default) + { + if (size > CurrentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(size), "Cannot begin a read for a larger size than the current remaining size."); + + if (size < 0) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(size), "Cannot be negative"); + + var previousSize = CurrentSize; + var previousStartPos = _currentStartPos; + var previousBufferRequirement = CurrentBufferRequirement; + _currentSize = size; + _currentBufferRequirement = bufferRequirement; + _currentStartPos = FieldOffset; + + var byteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, size); + if (ShouldBuffer(byteCount)) + await Buffer(async, byteCount, cancellationToken).ConfigureAwait(false); + return new NestedReadScope(async, this, previousSize, previousStartPos, previousBufferRequirement); + } + + public NestedReadScope BeginNestedRead(int size, Size bufferRequirement) + => BeginNestedRead(async: false, size, bufferRequirement, CancellationToken.None).GetAwaiter().GetResult(); + + public ValueTask BeginNestedReadAsync(int size, Size bufferRequirement, CancellationToken cancellationToken = default) + => BeginNestedRead(async: true, size, bufferRequirement, cancellationToken); + + /// Seek origin is the start of Current, e.g. Seek(0) rewinds to the start. + internal void Seek(int offset) + { + var currentOffset = CurrentOffset; + if (currentOffset > offset) + Rewind(currentOffset - offset); + else if (currentOffset < offset) + Consume(offset - currentOffset); + } + + public void Consume(int? count = null) + { + if (count <= 0 || FieldSize < 0 || FieldRemaining == 0) + return; + + var currentRemaining = CurrentRemaining; + var remaining = count ?? currentRemaining; + + if (count > currentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to read past the end of the current field size."); + + if (UserStreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + + var origOffset = FieldOffset; + // A breaking exception unwind from a nested scope should not try to consume its remaining data. + if (!_buffer.Connector.IsBroken) + _buffer.Skip(remaining, allowIO: true); + + Debug.Assert(FieldRemaining == FieldSize - origOffset - remaining); + } + + public async ValueTask ConsumeAsync(int? count = null, CancellationToken cancellationToken = default) + { + if (count <= 0 || FieldSize < 0 || FieldRemaining == 0) + return; + + var currentRemaining = CurrentRemaining; + var remaining = count ?? currentRemaining; + + if (count > currentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to read past the end of the current field size."); + + if (UserStreamActive) + await DisposeUserActiveStream(async: true).ConfigureAwait(false); + + var origOffset = FieldOffset; + // A breaking exception unwind from a nested scope should not try to consume its remaining data. + if (!_buffer.Connector.IsBroken) + await _buffer.Skip(async: true, remaining).ConfigureAwait(false); + + Debug.Assert(FieldRemaining == FieldSize - origOffset - remaining); + } + + [MemberNotNullWhen(true, nameof(_userActiveStream))] + bool UserStreamActive => _userActiveStream switch + { + NpgsqlReadBuffer.ColumnStream { IsDisposed: false } => true, + SubReadStream { IsDisposed: false } => true, + _ => false + }; + [MethodImpl(MethodImplOptions.NoInlining)] + void Cleanup() + { + if (UserStreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + + if (_pooledArray is not null) + { + ArrayPool.Return(_pooledArray); + _pooledArray = null; + } + + if (_getCharsReader is not null) + { + _getCharsReader.Dispose(); + _getCharsReader = null; + _getCharsRead = default; + } + + if (_preparedTextReader is not null) + { + _preparedTextReader.Dispose(); + _preparedTextReader = null; + } + + _requiresCleanup = false; + } + + void ResetCurrent() + { + _currentStartPos = 0; + _currentBufferRequirement = default; + _currentSize = UninitializedSentinel; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal int Restart(bool resumable) + { + if (!Initialized) + ThrowHelper.ThrowInvalidOperationException("Cannot restart a non-initialized reader."); + + // We resume if the reader was initialized as resumable and we're not explicitly restarting as non-resumable. + // When the field size is DbNullSentinel (i.e. -1) we're always restarting as resumable, to allow rereading null values endlessly. + var fieldSize = FieldSize; + if ((Resumable && resumable) || fieldSize is DbNullSentinel) + { + _resumable = true; + return fieldSize; + } + + // From this point on we're not resuming, we're resetting any previous converter state and rewinding our position. + + if (NestedInitialized) + ResetCurrent(); + + _resumable = resumable; + RewindCore(FieldOffset); + + Debug.Assert(Initialized); + return fieldSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void Commit() + { + if (!Initialized) + return; + + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + Cleanup(); + + if (NestedInitialized) + ResetCurrent(); + + // We make sure to fuly consume any FieldRemaining in the event of an exception or a nested scope not being disposed. + Debug.Assert(!NestedInitialized); + if (FieldRemaining > 0) + Consume(); + + _fieldStartPos = UninitializedSentinel; + Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldEndPos = default; + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal ValueTask CommitAsync() + { + if (!Initialized) + return new(); + + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + Cleanup(); + + if (NestedInitialized) + ResetCurrent(); + + // We make sure to fuly consume any FieldRemaining in the event of an exception or a nested scope not being disposed. + Debug.Assert(!NestedInitialized); + if (FieldRemaining > 0) + return CommitAsync(); + + _fieldStartPos = UninitializedSentinel; + Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldEndPos = default; + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + + return new(); + + async ValueTask CommitAsync() + { + await ConsumeAsync().ConfigureAwait(false); + + _fieldStartPos = UninitializedSentinel; + Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldEndPos = default; + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + } + } + + byte[] RentArray(int count) + { + _requiresCleanup = true; + var pooledArray = _pooledArray; + if (pooledArray is not null) + { + if (pooledArray.Length >= count) + return pooledArray; + ArrayPool.Return(pooledArray); + } + var array = _pooledArray = ArrayPool.Rent(count); + return array; + } + + // We check FieldAtStart to speed up simple value reads, as field level buffering was handled by reader.StartRead() already. + internal bool ShouldBufferCurrent() + => !FieldAtStart && ShouldBuffer(BufferRequirements.GetMinimumBufferByteCount(CurrentBufferRequirement, CurrentRemaining)); + + public bool ShouldBuffer(int byteCount) + { + return _buffer.ReadBytesLeft < byteCount && ShouldBufferSlow(byteCount); + + [MethodImpl(MethodImplOptions.NoInlining)] + bool ShouldBufferSlow(int byteCount) + { + if (byteCount > _buffer.Size) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(byteCount), + "Buffer requirement is larger than the buffer size, this can never succeed by buffering data but requires a larger buffer size instead."); + if (byteCount > CurrentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(byteCount), + "Buffer requirement is larger than the remaining length of the value, make sure the value is always at least this size or use an upper bound requirement instead."); + + return true; + } + } + + public void Buffer(int byteCount) => _buffer.Ensure(byteCount); + + public ValueTask BufferAsync(int byteCount, CancellationToken cancellationToken) => _buffer.EnsureAsync(byteCount); + + internal ValueTask Buffer(bool async, int byteCount, CancellationToken cancellationToken) + { + if (async) + return BufferAsync(byteCount, cancellationToken); + + Buffer(byteCount); + return new(); + } + + void ThrowNotConsumedExactly() => + throw _buffer.Connector.Break( + new InvalidOperationException( + FieldOffset < FieldSize + ? $"The read on this field has not consumed all of its bytes (pos: {FieldOffset}, len: {FieldSize})" + : $"The read on this field has consumed all of its bytes and read into the subsequent bytes (pos: {FieldOffset}, len: {FieldSize})")); +} + +public readonly struct NestedReadScope : IDisposable, IAsyncDisposable +{ + readonly PgReader _reader; + readonly int _previousSize; + readonly int _previousStartPos; + readonly Size _previousBufferRequirement; + readonly bool _async; + + internal NestedReadScope(bool async, PgReader reader, int previousSize, int previousStartPos, Size previousBufferRequirement) + { + _async = async; + _reader = reader; + _previousSize = previousSize; + _previousStartPos = previousStartPos; + _previousBufferRequirement = previousBufferRequirement; + } + + public void Dispose() + { + if (_async) + ThrowHelper.ThrowInvalidOperationException("Cannot synchronously dispose async scopes, call DisposeAsync instead."); + DisposeAsync().GetAwaiter().GetResult(); + } + + public ValueTask DisposeAsync() + { + if (_reader.CurrentRemaining > 0) + { + if (_async) + return AsyncCore(_reader, _previousSize, _previousStartPos, _previousBufferRequirement); + + _reader.Consume(); + } + _reader.RevertNestedReadScope(_previousSize, _previousStartPos, _previousBufferRequirement); + return new(); + + static async ValueTask AsyncCore(PgReader reader, int previousSize, int previousStartPos, Size previousBufferRequirement) + { + await reader.ConsumeAsync().ConfigureAwait(false); + reader.RevertNestedReadScope(previousSize, previousStartPos, previousBufferRequirement); + } + } +} diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs new file mode 100644 index 0000000000..da6a5722b9 --- /dev/null +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -0,0 +1,160 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Text; +using Npgsql.Internal.Postgres; +using Npgsql.NameTranslation; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public sealed class PgSerializerOptions +{ + internal static UTF8Encoding DefaultUtf8Encoding => NpgsqlWriteBuffer.UTF8Encoding; + + /// + /// Used by GetSchema to be able to attempt to resolve all type catalog types without exceptions. + /// + [field: ThreadStatic] + internal static bool IntrospectionCaller { get; set; } + + readonly PgTypeInfoResolverChain _resolverChain; + readonly Func? _timeZoneProvider; + IPgTypeInfoResolver? _typeInfoResolver; + object? _typeInfoCache; + + internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, PgTypeInfoResolverChain? resolverChain = null, Func? timeZoneProvider = null) + { + _resolverChain = resolverChain ?? new(); + _timeZoneProvider = timeZoneProvider; + DatabaseInfo = databaseInfo; + UnspecifiedDBNullTypeInfo = new(this, new Converters.Internal.VoidConverter(), DataTypeName.Unspecified, requestedType: typeof(DBNull)); + } + + internal PgConcreteTypeInfo UnspecifiedDBNullTypeInfo { get; } + + PostgresType? _textPgType; + internal PgTypeId TextPgTypeId => ToCanonicalTypeId(_textPgType ??= DatabaseInfo.GetPostgresType(DataTypeNames.Text)); + + // Used purely for type mapping, where we don't have a full set of types but resolvers might know enough. + readonly bool _introspectionInstance; + internal bool IntrospectionMode + { + get => _introspectionInstance || IntrospectionCaller; + init => _introspectionInstance = value; + } + + /// Whether options should return a portable identifier (data type name) to prevent any generated id (oid) confusion across backends, this comes with a perf penalty. + internal bool PortableTypeIds { get; init; } + internal NpgsqlDatabaseInfo DatabaseInfo { get; } + + public string TimeZone => _timeZoneProvider?.Invoke() ?? throw new NotSupportedException("TimeZone was not configured."); + public Encoding TextEncoding { get; init; } = NpgsqlWriteBuffer.RelaxedUTF8Encoding; + public IPgTypeInfoResolver TypeInfoResolver + { + get => _typeInfoResolver ??= new ChainTypeInfoResolver(_resolverChain); + internal init => _typeInfoResolver = value; + } + public bool EnableDateTimeInfinityConversions { get; init; } = true; + + public ArrayNullabilityMode ArrayNullabilityMode { get; init; } = ArrayNullabilityMode.Never; + public INpgsqlNameTranslator DefaultNameTranslator { get; init; } = NpgsqlSnakeCaseNameTranslator.Instance; + + public static bool IsWellKnownTextType(Type type) + { + type = type.IsValueType ? Nullable.GetUnderlyingType(type) ?? type : type; + return Array.IndexOf([ + typeof(string), typeof(char), + typeof(char[]), typeof(ReadOnlyMemory), typeof(ArraySegment), + typeof(byte[]), typeof(ReadOnlyMemory) + ], type) != -1 || typeof(Stream).IsAssignableFrom(type); + } + + internal bool RangesEnabled => _resolverChain.RangesEnabled; + internal bool MultirangesEnabled => _resolverChain.MultirangesEnabled; + internal bool ArraysEnabled => _resolverChain.ArraysEnabled; + + // We don't verify the kind of pgTypeId we get, it'll throw if it's incorrect. + // It's up to the caller to call GetCanonicalTypeId if they want to use an oid instead of a DataTypeName. + // This also makes it easier to realize it should be a cached value if infos for different CLR types are requested for the same + // pgTypeId. Effectively it should be 'impossible' to get the wrong kind via any PgConverterOptions api which is what this is mainly + // for. + PgTypeInfo? GetTypeInfoCore(Type? type, PgTypeId? pgTypeId) + => PortableTypeIds + ? ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.DataTypeName) + : ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.Oid); + + internal PgTypeInfo? GetTypeInfoInternal(Type? type, PgTypeId? pgTypeId) + => GetTypeInfoCore(type, pgTypeId); + + public PgTypeInfo? GetDefaultTypeInfo(Type type) + => GetTypeInfoCore(type, null); + + public PgTypeInfo? GetDefaultTypeInfo(PgTypeId pgTypeId) + => GetTypeInfoCore(null, GetCanonicalTypeId(pgTypeId)); + + public PgTypeInfo? GetTypeInfo(Type type, PgTypeId pgTypeId) + => GetTypeInfoCore(type, GetCanonicalTypeId(pgTypeId)); + + // If a given type id is in the opposite form than what was expected it will be mapped according to the requirement. + internal PgTypeId GetCanonicalTypeId(PgTypeId pgTypeId) + => PortableTypeIds ? DatabaseInfo.GetDataTypeName(pgTypeId) : DatabaseInfo.GetOid(pgTypeId); + + // If a given type id is in the opposite form than what was expected it will be mapped according to the requirement. + internal PgTypeId ToCanonicalTypeId(PostgresType pgType) + => PortableTypeIds ? pgType.DataTypeName : (Oid)pgType.OID; + + public PgTypeId GetArrayTypeId(PgTypeId elementTypeId) + { + // Static affordance to help the global type mapper. + if (PortableTypeIds && elementTypeId.IsDataTypeName) + return elementTypeId.DataTypeName.ToArrayName(); + + return ToCanonicalTypeId(DatabaseInfo.GetPostgresType(elementTypeId).Array + ?? throw new NotSupportedException("Cannot resolve array type id")); + } + + public PgTypeId GetArrayElementTypeId(PgTypeId arrayTypeId) + { + // Static affordance to help the global type mapper. + if (PortableTypeIds && arrayTypeId.IsDataTypeName && arrayTypeId.DataTypeName.UnqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal)) + return new DataTypeName(arrayTypeId.DataTypeName.Schema + arrayTypeId.DataTypeName.UnqualifiedNameSpan.Slice(1).ToString()); + + return ToCanonicalTypeId((DatabaseInfo.GetPostgresType(arrayTypeId) as PostgresArrayType)?.Element + ?? throw new NotSupportedException("Cannot resolve array element type id")); + } + + public PgTypeId GetRangeTypeId(PgTypeId subtypeTypeId) => + ToCanonicalTypeId(DatabaseInfo.GetPostgresType(subtypeTypeId).Range + ?? throw new NotSupportedException("Cannot resolve range type id")); + + public PgTypeId GetRangeSubtypeTypeId(PgTypeId rangeTypeId) => + ToCanonicalTypeId((DatabaseInfo.GetPostgresType(rangeTypeId) as PostgresRangeType)?.Subtype + ?? throw new NotSupportedException("Cannot resolve range subtype type id")); + + public PgTypeId GetMultirangeTypeId(PgTypeId rangeTypeId) => + ToCanonicalTypeId((DatabaseInfo.GetPostgresType(rangeTypeId) as PostgresRangeType)?.Multirange + ?? throw new NotSupportedException("Cannot resolve multirange type id")); + + public PgTypeId GetMultirangeElementTypeId(PgTypeId multirangeTypeId) => + ToCanonicalTypeId((DatabaseInfo.GetPostgresType(multirangeTypeId) as PostgresMultirangeType)?.Subrange + ?? throw new NotSupportedException("Cannot resolve multirange element type id")); + + public bool TryGetDataTypeName(PgTypeId pgTypeId, out DataTypeName dataTypeName) + { + if (DatabaseInfo.FindPostgresType(pgTypeId) is { } pgType) + { + dataTypeName = pgType.DataTypeName; + return true; + } + + dataTypeName = default; + return false; + } + + public DataTypeName GetDataTypeName(PgTypeId pgTypeId) + => !TryGetDataTypeName(pgTypeId, out var name) + ? throw new ArgumentException("Unknown type id", nameof(pgTypeId)) + : name; +} diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs new file mode 100644 index 0000000000..951e940fd8 --- /dev/null +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgStreamingConverter(bool customDbNullPredicate = false) : PgConverter(customDbNullPredicate) +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary; + } + + // Workaround for trimming https://github.com/dotnet/runtime/issues/92850#issuecomment-1744521361 + internal Task? ReadAsyncAsTask(PgReader reader, CancellationToken cancellationToken, out T result) + { + var task = ReadAsync(reader, cancellationToken); + if (task.IsCompletedSuccessfully) + { + result = task.Result; + return null; + } + result = default!; + return task.AsTask(); + } + + internal sealed override unsafe ValueTask ReadAsObject( + bool async, PgReader reader, CancellationToken cancellationToken) + { + if (!async) + return new(Read(reader)!); + + var task = ReadAsync(reader, cancellationToken); + return task.IsCompletedSuccessfully + ? new(task.Result!) + : PgStreamingConverterHelpers.AwaitTask(task.AsTask(), new(this, &BoxResult)); + + static object BoxResult(Task task) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + return new ValueTask(task: Unsafe.As>(task)).Result!; + } + } + + internal sealed override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + if (async) + return WriteAsync(writer, (T)value, cancellationToken); + + Write(writer, (T)value); + return new(); + } +} + +// Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is +// passed along. As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're +// done. +// The alternatives are: +// 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). +// 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse +// as well. +static class PgStreamingConverterHelpers +{ + // Split out from the generic class to amortize the huge size penalty per async state machine, which would otherwise be per + // instantiation. + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + public static async ValueTask AwaitTask(Task task, Continuation continuation) + { + await task.ConfigureAwait(false); + var result = continuation.Invoke(task); + // Guarantee the type stays loaded until the function pointer call is done. + GC.KeepAlive(continuation.Handle); + return result; + } + + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent + // mistakes. + public readonly unsafe struct Continuation + { + public object Handle { get; } + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public Continuation(object handle, delegate* continuation) + { + Handle = handle; + _continuation = continuation; + } + + public object Invoke(Task task) => _continuation(task); + } +} diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs new file mode 100644 index 0000000000..97417eb852 --- /dev/null +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -0,0 +1,469 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgTypeInfo +{ + PgTypeInfo(PgSerializerOptions options, Type type, Type? requestedType) + { + Options = options; + + HasExactType = requestedType is null || requestedType == type; + Type = requestedType is null ? type : GetReportedType(type, requestedType) ?? type; + } + + private protected PgTypeInfo(PgSerializerOptions options, Type type, PgTypeId? pgTypeId, Type? requestedType = null) + : this(options, type, requestedType) + => PgTypeId = pgTypeId is { } id ? options.GetCanonicalTypeId(id) : null; + + private protected PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? requestedType = null) + : this(options, converter.TypeToConvert, pgTypeId, requestedType) {} + + public Type Type { get; } + public PgSerializerOptions Options { get; } + + // True when the reported type matches the converter's type exactly (no reported type given at construction, or + // the given reported type equals the converter type). When false, the reported type is a widening of the converter + // type (e.g. Array/Stream base-type reporting, enum-underlying widening) and the caller must dispatch through the + // info — the info routes reference-variance cases through the object APIs and layout-identity cases (enum) through + // the typed path with Unsafe.As, as appropriate for the widening kind. + // Having a single converter cover multiple reported types (Arrays, Streams) reduces the number of generic + // instantiations that need to be compiled for AOT. + internal bool HasExactType { get; } + + public PgTypeId? PgTypeId { get; } + + /// + /// Makes a for the given field. + /// + /// The field whose metadata drives the concrete type info selection. + /// The to use for the field. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the field's metadata (e.g. ) to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForField(Field field) + { + if (this is PgConcreteTypeInfo concrete) + return concrete; + + // Decided providers skip GetDefault's validation. The prior GetForField call already validated + // the id. Undecided providers thread it so GetDefaultCore can dispatch on it. + var providerTypeInfo = (PgProviderTypeInfo)this; + return providerTypeInfo.GetForField(field) + ?? providerTypeInfo.GetDefault(providerTypeInfo.PgTypeId is null ? field.PgTypeId : null); + } + + /// + /// Makes a for the given value. + /// + /// The value whose content drive the concrete type info selection. + /// Contains any write state that was produced. + /// The CLR type of the value. + /// The to use for the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValue(T? value, out object? writeState) + => MakeConcreteForValue(default, value, out writeState); + + /// + /// Makes a for the given value, with an explicit provider context. + /// + /// The context used when this instance is a provider based info. + /// The value whose content drives the concrete type info selection. + /// Contains any write state that was produced. + /// The CLR type of the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value and the supplied context to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValue(ProviderValueContext context, T? value, out object? writeState) + { + if (this is PgConcreteTypeInfo concrete) + { + writeState = null; + return concrete; + } + + // Make sure we handle the non-exact typed provider case. + // This will never cause boxing as non-exact typed infos only happen for subtype relationships, i.e. reference types. + // We make sure to fall through to GetForValue which has a better error if T is not at all related to this info. + var providerTypeInfo = (PgProviderTypeInfo)this; + var concreteTypeInfo = PgProviderTypeInfo.GetProvider(providerTypeInfo) is not PgConcreteTypeInfoProvider && providerTypeInfo.Type == typeof(T) + ? providerTypeInfo.GetForValueAsObject(context, (object?)value, out writeState) + : providerTypeInfo.GetForValue(context, value, out writeState); + + // Decided providers skip GetDefault's validation. The prior GetForValue call already validated + // the id. Undecided providers thread it so GetDefaultCore can dispatch on it. + return concreteTypeInfo ?? providerTypeInfo.GetDefault(providerTypeInfo.PgTypeId is null ? context.ExpectedPgTypeId : null); + } + + /// + /// Makes a for the given object value. + /// + /// The untyped value whose content drives the concrete type info selection. + /// Contains any write state that was produced. + /// The to use for the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValueAsObject(object? value, out object? writeState) + => MakeConcreteForValueAsObject(default, value, out writeState); + + /// + /// Makes a for the given object value. + /// + /// The context used when this instance is a provider based info. + /// The untyped value whose content drives the concrete type info selection. + /// Contains any write state that was produced. + /// The to use for the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValueAsObject(ProviderValueContext context, object? value, out object? writeState) + { + if (this is PgConcreteTypeInfo concrete) + { + writeState = null; + return concrete; + } + + // Decided providers skip GetDefault's validation. The prior GetForValueAsObject call already validated + // the id. Undecided providers thread it so GetDefaultCore can dispatch on it. + var providerTypeInfo = (PgProviderTypeInfo)this; + return providerTypeInfo.GetForValueAsObject(context, value, out writeState) + ?? providerTypeInfo.GetDefault(providerTypeInfo.PgTypeId is null ? context.ExpectedPgTypeId : null); + } + + // Having it here so we can easily extend any behavior. + internal void DisposeWriteState(object writeState) + { + if (writeState is IDisposable disposable) + disposable.Dispose(); + } + + /// + /// Returns when it is a strict subtype of , otherwise null. + /// Throws when the two are not in a subtype relationship. + /// + protected static Type? GetReportedType(Type converterType, Type requestedType) + { + if (!requestedType.IsInSubtypeRelationshipWith(converterType)) + throw new ArgumentException($"The requested type {requestedType} is not in a subtype relationship with the converter's type {converterType}.", nameof(requestedType)); + + return requestedType != converterType && requestedType.IsAssignableTo(converterType) ? requestedType : null; + } +} + +public sealed class PgProviderTypeInfo : PgTypeInfo +{ + readonly PgConcreteTypeInfoProvider _typeInfoProvider; + readonly PgConcreteTypeInfo? _defaultConcrete; + + public PgProviderTypeInfo(PgSerializerOptions options, PgConcreteTypeInfoProvider typeInfoProvider, PgTypeId? pgTypeId) + : this(options, typeInfoProvider, pgTypeId, requestedType: null) + {} + + internal PgProviderTypeInfo(PgSerializerOptions options, PgConcreteTypeInfoProvider typeInfoProvider, PgTypeId? pgTypeId, Type? requestedType) + : base(options, typeInfoProvider.TypeToConvert, pgTypeId, requestedType) + { + _typeInfoProvider = typeInfoProvider; + + // Always validate the default provider result, the info will be re-used so there is no real downside. + var result = typeInfoProvider.GetDefault(pgTypeId is { } id ? options.GetCanonicalTypeId(id) : null); + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetDefault), result, typeInfoProvider.TypeToConvert, options.PortableTypeIds); + _defaultConcrete = result; + } + + public PgConcreteTypeInfo GetDefault(PgTypeId? pgTypeId) + { + if (pgTypeId is { } id && PgTypeId is { } decidedId) + { + if (id != decidedId) + ThrowUnexpectedPgTypeId(nameof(pgTypeId)); + + Debug.Assert(_defaultConcrete is not null); + return _defaultConcrete; + } + + var result = _typeInfoProvider.GetDefault(pgTypeId ?? PgTypeId); + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetDefault), result); + return result; + } + + public PgConcreteTypeInfo? GetForField(Field field) + { + if (PgTypeId is { } decidedId && field.PgTypeId != decidedId) + ThrowUnexpectedPgTypeId(nameof(field)); + + var result = _typeInfoProvider.GetForField(field); + if (result is not null) + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetForField), result); + return result; + } + + public PgConcreteTypeInfo? GetForValue(ProviderValueContext context, T? value, out object? writeState) + { + if (PgTypeId is { } pgTypeId) + { + if (context.ExpectedPgTypeId is not { } expectedId) + { + context = context with { ExpectedPgTypeId = pgTypeId }; + } + else if (pgTypeId != expectedId) + ThrowUnexpectedPgTypeId(nameof(context.ExpectedPgTypeId)); + } + + writeState = null; + var result = _typeInfoProvider is PgConcreteTypeInfoProvider providerT + ? providerT.GetForValue(context, value, ref writeState) + : ThrowNotSupportedType(typeof(T)); + + if (result is not null) + ValidateResult(nameof(PgConcreteTypeInfoProvider<>.GetForValue), result); + return result; + + PgConcreteTypeInfo ThrowNotSupportedType(Type? type) + => throw new NotSupportedException(type == Type + ? $"PgProviderTypeInfo does not exactly match type {type}, call {nameof(GetForValueAsObject)} instead." + : $"PgProviderTypeInfo is incompatible with type {type}"); + } + + public PgConcreteTypeInfo? GetForValueAsObject(ProviderValueContext context, object? value, out object? writeState) + { + if (PgTypeId is { } pgTypeId) + { + if (context.ExpectedPgTypeId is not { } expectedId) + { + context = context with { ExpectedPgTypeId = pgTypeId }; + } + else if (pgTypeId != expectedId) + ThrowUnexpectedPgTypeId(nameof(context.ExpectedPgTypeId)); + } + + writeState = null; + var result = _typeInfoProvider.GetForValueAsObject(context, value, ref writeState); + if (result is not null) + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetForValueAsObject), result); + return result; + } + + public static PgConcreteTypeInfoProvider GetProvider(PgProviderTypeInfo instance) => instance._typeInfoProvider; + + static void ThrowUnexpectedPgTypeId(string parameterName) + => throw new ArgumentException($"PgTypeId does not match the decided value on this {nameof(PgProviderTypeInfo)}.", parameterName); + + void ValidateResult(string methodName, PgConcreteTypeInfo result) + => ValidateResult(methodName, result, _typeInfoProvider.TypeToConvert, Options.PortableTypeIds); + + static void ValidateResult(string methodName, PgConcreteTypeInfo result, Type expectedTypeToConvert, bool expectPortableTypeIds) + { + if (expectedTypeToConvert != typeof(object) && result.Converter.TypeToConvert != expectedTypeToConvert) + throw new InvalidOperationException($"'{methodName}' returned a {nameof(result.Converter)} of type {result.Converter.TypeToConvert} instead of {expectedTypeToConvert} unexpectedly."); + + if (expectPortableTypeIds && result.PgTypeId.IsOid || !expectPortableTypeIds && result.PgTypeId.IsDataTypeName) + throw new InvalidOperationException($"'{methodName}' returned a concrete type info with a {nameof(result.PgTypeId)} that was not in canonical form."); + } +} + +public sealed class PgConcreteTypeInfo : PgTypeInfo +{ + readonly bool _canBinaryConvert; + readonly BufferRequirements _binaryBufferRequirements; + + readonly bool _canTextConvert; + readonly BufferRequirements _textBufferRequirements; + + public PgConcreteTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId) + : this(options, converter, pgTypeId, requestedType: null) + {} + + internal PgConcreteTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? requestedType) + : base(options, converter, pgTypeId, requestedType) + { + Converter = converter; + _canBinaryConvert = converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); + _canTextConvert = converter.CanConvert(DataFormat.Text, out _textBufferRequirements); + + SupportsReading = GetDefaultSupportsReading(converter.TypeToConvert, requestedType); + SupportsWriting = true; + } + + Type TypeToConvert + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => Converter.TypeToConvert; + } + + public PgConverter Converter { get; } + + public bool SupportsReading { get; init; } + public bool SupportsWriting { get; init; } + + // We assume a non-exact typed info does not support reading as the converter won't be able to produce the derived type statically. + // Cases like Array converters reading int[], int[,] etc. are the exception and the reason why SupportsReading is a settable property. + internal static bool GetDefaultSupportsReading(Type type, Type? requestedType) + => requestedType is null || GetReportedType(type, requestedType) is not { } reportedType || reportedType == type; + + public DataFormat? PreferredFormat { get; init; } + public new PgTypeId PgTypeId => base.PgTypeId.GetValueOrDefault(); + + internal bool CanReadTo(Type type) => Type == type || (!HasExactType && Type.IsAssignableTo(type)); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal T ReadFieldValue(PgReader reader, in PgFieldBinding binding) + { + reader.StartRead(binding); + var result = Converter.Read(reader); + reader.EndRead(); + return result; + } + + internal async ValueTask ReadFieldValueAsync(PgReader reader, PgFieldBinding binding, CancellationToken cancellationToken) + { + await reader.StartReadAsync(binding, cancellationToken).ConfigureAwait(false); + + // Inline copy of Converter.ReadAsync to keep everything in one async frame. + var result = typeof(T) != TypeToConvert + ? (T)await Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await Unsafe.As>(Converter).ReadAsync(reader, cancellationToken).ConfigureAwait(false); + + await reader.EndReadAsync().ConfigureAwait(false); + return result; + } + + // TryBind for reading. + internal bool TryBindField(DataFormat format, out PgFieldBinding binding) + { + if (!Converter.CanConvert(format, out var bufferRequirements)) + { + binding = default; + return false; + } + binding = new(format, bufferRequirements.Read); + return true; + } + + // Bind for reading. + internal PgFieldBinding BindField(DataFormat format) + { + if (!TryBindField(format, out var info)) + ThrowHelper.ThrowInvalidOperationException($"Converter does not support {format} format."); + + return info; + } + + // Bind for writing. + /// When result is null, the value was interpreted to be a SQL NULL. + internal PgValueBinding BindParameterValue(T? value, object? writeState, DataFormat? formatPreference = null) + { + if (typeof(T) != TypeToConvert) + return BindParameterObjectValue(value, writeState, formatPreference); + + // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. + if (!SupportsWriting) + ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); + + var format = ResolveFormat(out var bufferRequirements, formatPreference ?? PreferredFormat); + + Debug.Assert(Converter is PgConverter); + if (Unsafe.As>(Converter).IsDbNullOrGetSize(format, bufferRequirements.Write, value, ref writeState) is not { } size) + return new(format, bufferRequirements.Write, null, null); + + return new(format, bufferRequirements.Write, size, writeState); + } + + // Bind for writing. + // Note: this api is not called BindAsObject as the semantics are extended, DBNull is a NULL value for all object values. + /// When result is null or DBNull, the value was interpreted to be a SQL NULL. + internal PgValueBinding BindParameterObjectValue(object? value, object? writeState, DataFormat? formatPreference = null) + { + // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. + if (!SupportsWriting) + ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); + + var format = ResolveFormat(out var bufferRequirements, formatPreference ?? PreferredFormat); + + // Given SQL values are effectively a union of T | NULL we support DBNull.Value to signify a NULL value for all types except DBNull in this api. + if (value is DBNull && Type != typeof(DBNull) || Converter.IsDbNullOrGetSizeAsObject(format, bufferRequirements.Write, value, ref writeState) is not { } size) + { + return new(format, bufferRequirements.Write, null, null); + } + + return new(format, bufferRequirements.Write, size, writeState); + } + + DataFormat ResolveFormat(out BufferRequirements bufferRequirements, DataFormat? formatPreference = null) + { + // First try to check for preferred support. + switch (formatPreference) + { + case DataFormat.Binary when _canBinaryConvert: + bufferRequirements = _binaryBufferRequirements; + return DataFormat.Binary; + case DataFormat.Text when _canTextConvert: + bufferRequirements = _textBufferRequirements; + return DataFormat.Text; + default: + // The common case, no preference given (or no match) means we default to binary if supported. + if (_canBinaryConvert) + { + bufferRequirements = _binaryBufferRequirements; + return DataFormat.Binary; + } + + if (Converter.CanConvert(DataFormat.Text, out bufferRequirements)) + { + bufferRequirements = _textBufferRequirements; + return DataFormat.Text; + } + + ThrowHelper.ThrowInvalidOperationException("Converter doesn't support any data format."); + bufferRequirements = default; + return default; + } + } +} + +readonly struct PgFieldBinding +{ + internal PgFieldBinding(DataFormat dataFormat, Size bufferRequirement) + { + DataFormat = dataFormat; + BufferRequirement = bufferRequirement; + } + + public DataFormat DataFormat { get; } + public Size BufferRequirement { get; } +} + +readonly struct PgValueBinding +{ + public DataFormat DataFormat { get; } + public Size BufferRequirement { get; } + public Size? Size { get; } + public object? WriteState { get; } + + internal PgValueBinding(DataFormat dataFormat, Size bufferRequirement, Size? size, object? writeState) + { + DataFormat = dataFormat; + BufferRequirement = bufferRequirement; + Size = size; + WriteState = writeState; + } + + [MemberNotNullWhen(false, nameof(Size))] + public bool IsDbNullBinding => Size is null; +} diff --git a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs new file mode 100644 index 0000000000..86c96231a0 --- /dev/null +++ b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs @@ -0,0 +1,187 @@ +using System; +using System.Collections; +using System.Collections.Generic; + +namespace Npgsql.Internal; + +struct PgTypeInfoResolverChainBuilder +{ + readonly List<(Type ImplementationType, object)> _factories = []; + Action>? _addRangeResolvers; + Action>? _addMultirangeResolvers; + RangeArrayHandler _rangeArrayHandler = RangeArrayHandler.Instance; + MultirangeArrayHandler _multirangeArrayHandler = MultirangeArrayHandler.Instance; + Action>? _addArrayResolvers; + + public PgTypeInfoResolverChainBuilder() + { + } + + public void Clear() => _factories.Clear(); + + public void AppendResolverFactory(PgTypeInfoResolverFactory factory) + => AddResolverFactory(factory.GetType(), factory); + public void AppendResolverFactory(Func factory) where T : PgTypeInfoResolverFactory + => AddResolverFactory(typeof(T), Memoize(factory)); + + public void PrependResolverFactory(PgTypeInfoResolverFactory factory) + => AddResolverFactory(factory.GetType(), factory, prepend: true); + public void PrependResolverFactory(Func factory) where T : PgTypeInfoResolverFactory + => AddResolverFactory(typeof(T), Memoize(factory), prepend: true); + + // Memoize the caller factory so all our actions (_addArrayResolvers etc.) call into the same instance. + static Func Memoize(Func factory) + { + PgTypeInfoResolverFactory? instance = null; + return () => instance ??= factory(); + } + + static PgTypeInfoResolverFactory GetInstance((Type, object Instance) factory) => factory.Instance switch + { + PgTypeInfoResolverFactory f => f, + Func f => f(), + _ => throw new ArgumentOutOfRangeException(nameof(factory), factory, null) + }; + + void AddResolverFactory(Type type, object factory, bool prepend = false) + { + for (var i = 0; i < _factories.Count; i++) + if (_factories[i].ImplementationType == type) + { + _factories.RemoveAt(i); + break; + } + + if (prepend) + _factories.Insert(0, (type, factory)); + else + _factories.Add((type, factory)); + } + + public void EnableRanges() + { + _addRangeResolvers ??= AddResolvers; + _rangeArrayHandler = RangeArrayHandlerImpl.Instance; + + static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) + { + foreach (var factory in instance._factories) + if (GetInstance(factory).CreateRangeResolver() is { } resolver) + resolvers.Add(resolver); + } + } + + public void EnableMultiranges() + { + _addMultirangeResolvers ??= AddResolvers; + _multirangeArrayHandler = MultirangeArrayHandlerImpl.Instance; + + static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) + { + foreach (var factory in instance._factories) + if (GetInstance(factory).CreateMultirangeResolver() is { } resolver) + resolvers.Add(resolver); + } + } + + public void EnableArrays() + { + _addArrayResolvers ??= AddResolvers; + + static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) + { + foreach (var factory in instance._factories) + if (GetInstance(factory).CreateArrayResolver() is { } resolver) + resolvers.Add(resolver); + + if (instance._addRangeResolvers is not null) + foreach (var factory in instance._factories) + if (instance._rangeArrayHandler.CreateRangeArrayResolver(GetInstance(factory)) is { } resolver) + resolvers.Add(resolver); + + if (instance._addMultirangeResolvers is not null) + foreach (var factory in instance._factories) + if (instance._multirangeArrayHandler.CreateMultirangeArrayResolver(GetInstance(factory)) is { } resolver) + resolvers.Add(resolver); + } + } + + public PgTypeInfoResolverChain Build(Action>? configure = null) + { + var resolvers = new List(); + foreach (var factory in _factories) + resolvers.Add(GetInstance(factory).CreateResolver()); + var instance = this; + _addRangeResolvers?.Invoke(instance, resolvers); + _addMultirangeResolvers?.Invoke(instance, resolvers); + _addArrayResolvers?.Invoke(instance, resolvers); + + configure?.Invoke(resolvers); + return new( + resolvers, + rangesEnabled: _addRangeResolvers is not null, + multirangesEnabled: _addMultirangeResolvers is not null, + arraysEnabled: _addArrayResolvers is not null + ); + } + + class RangeArrayHandler + { + public static RangeArrayHandler Instance { get; } = new(); + + public virtual IPgTypeInfoResolver? CreateRangeArrayResolver(PgTypeInfoResolverFactory factory) => null; + } + + sealed class RangeArrayHandlerImpl : RangeArrayHandler + { + public new static RangeArrayHandlerImpl Instance { get; } = new(); + + public override IPgTypeInfoResolver? CreateRangeArrayResolver(PgTypeInfoResolverFactory factory) => factory.CreateRangeArrayResolver(); + } + + class MultirangeArrayHandler + { + public static MultirangeArrayHandler Instance { get; } = new(); + + public virtual IPgTypeInfoResolver? CreateMultirangeArrayResolver(PgTypeInfoResolverFactory factory) => null; + } + + sealed class MultirangeArrayHandlerImpl : MultirangeArrayHandler + { + public new static MultirangeArrayHandlerImpl Instance { get; } = new(); + + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver(PgTypeInfoResolverFactory factory) => factory.CreateMultirangeArrayResolver(); + } +} + +readonly struct PgTypeInfoResolverChain : IEnumerable +{ + [Flags] + enum EnabledFlags + { + None = 0, + Ranges = 1, + Multiranges = 2, + Arrays = 4 + } + + readonly EnabledFlags _enabled; + readonly List _resolvers; + + public PgTypeInfoResolverChain(List resolvers, bool rangesEnabled, bool multirangesEnabled, bool arraysEnabled) + { + _enabled = rangesEnabled ? EnabledFlags.Ranges | _enabled : _enabled; + _enabled = multirangesEnabled ? EnabledFlags.Multiranges | _enabled : _enabled; + _enabled = arraysEnabled ? EnabledFlags.Arrays | _enabled : _enabled; + _resolvers = resolvers; + } + + public bool RangesEnabled => _enabled.HasFlag(EnabledFlags.Ranges); + public bool MultirangesEnabled => _enabled.HasFlag(EnabledFlags.Multiranges); + public bool ArraysEnabled => _enabled.HasFlag(EnabledFlags.Arrays); + + public IEnumerator GetEnumerator() + => _resolvers?.GetEnumerator() ?? (IEnumerator)Array.Empty().GetEnumerator(); + IEnumerator IEnumerable.GetEnumerator() + => _resolvers?.GetEnumerator() ?? Array.Empty().GetEnumerator(); +} diff --git a/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs b/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..9392e2c840 --- /dev/null +++ b/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs @@ -0,0 +1,16 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgTypeInfoResolverFactory +{ + public abstract IPgTypeInfoResolver CreateResolver(); + public abstract IPgTypeInfoResolver? CreateArrayResolver(); + + public virtual IPgTypeInfoResolver? CreateRangeResolver() => null; + public virtual IPgTypeInfoResolver? CreateRangeArrayResolver() => null; + + public virtual IPgTypeInfoResolver? CreateMultirangeResolver() => null; + public virtual IPgTypeInfoResolver? CreateMultirangeArrayResolver() => null; +} diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs new file mode 100644 index 0000000000..fde304f547 --- /dev/null +++ b/src/Npgsql/Internal/PgWriter.cs @@ -0,0 +1,645 @@ +using Npgsql.Internal.Postgres; +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; +using System.Text.Unicode; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +enum FlushMode +{ + None, + Blocking, + NonBlocking +} + +// A streaming alternative to a System.IO.Stream, instead based on the preferable IBufferWriter. +interface IStreamingWriter : IBufferWriter +{ + void Flush(TimeSpan timeout = default); + ValueTask FlushAsync(CancellationToken cancellationToken = default); +} + +sealed class NpgsqlBufferWriter(NpgsqlWriteBuffer buffer) : IStreamingWriter +{ + int? _lastBufferSize; + + public void Advance(int count) + { + if (_lastBufferSize < count || buffer.WriteSpaceLeft < count) + ThrowHelper.ThrowInvalidOperationException("Cannot advance past the end of the current buffer."); + _lastBufferSize = null; + buffer.WritePosition += count; + } + + public Memory GetMemory(int sizeHint = 0) + { + var writePosition = buffer.WritePosition; + var bufferSize = buffer.Size - writePosition; + if (sizeHint > bufferSize) + ThrowOutOfMemoryException(); + + _lastBufferSize = bufferSize; + return buffer.Buffer.AsMemory(writePosition, bufferSize); + } + + public Span GetSpan(int sizeHint = 0) + { + var writePosition = buffer.WritePosition; + var bufferSize = buffer.Size - writePosition; + if (sizeHint > bufferSize) + ThrowOutOfMemoryException(); + + _lastBufferSize = bufferSize; + return buffer.Buffer.AsSpan(writePosition, bufferSize); + } + + static void ThrowOutOfMemoryException() => throw new OutOfMemoryException("Not enough space left in buffer."); + + public void Flush(TimeSpan timeout = default) + { + if (timeout == TimeSpan.Zero) + buffer.Flush(); + else + { + TimeSpan? originalTimeout = null; + try + { + if (timeout != TimeSpan.Zero) + { + originalTimeout = buffer.Timeout; + buffer.Timeout = timeout; + } + buffer.Flush(); + } + finally + { + if (originalTimeout is { } value) + buffer.Timeout = value; + } + } + } + + public ValueTask FlushAsync(CancellationToken cancellationToken = default) + => new(buffer.Flush(async: true, cancellationToken)); +} + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public sealed class PgWriter +{ + readonly IBufferWriter _writer; + + byte[]? _buffer; + int _offset; + int _pos; + int _length; + + int _totalBytesWritten; + + ValueMetadata _current; + NpgsqlDatabaseInfo? _typeCatalog; + + internal PgWriter(IBufferWriter writer) => _writer = writer; + + internal PgWriter Init(NpgsqlDatabaseInfo typeCatalog, FlushMode flushMode = FlushMode.None) + { + if (_pos != _offset) + ThrowHelper.ThrowInvalidOperationException("Invalid concurrent use or PgWriter was not committed properly, PgWriter still has uncommitted bytes."); + + // Elide write barrier if we can. + if (!ReferenceEquals(_typeCatalog, typeCatalog)) + _typeCatalog = typeCatalog; + + FlushMode = flushMode; + _totalBytesWritten = 0; + RequestBuffer(count: 0); + return this; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + void RequestBuffer(int count) + { + // GetMemory will check whether count is larger than the max buffer size. + var mem = _writer.GetMemory(count); + if (!MemoryMarshal.TryGetArray(mem, out var segment)) + ThrowHelper.ThrowNotSupportedException("Only array backed writers are supported."); + + _buffer = segment.Array!; + _offset = _pos = segment.Offset; + _length = segment.Offset + segment.Count; + } + + internal FlushMode FlushMode { get; private set; } + + internal void RefreshBuffer() => RequestBuffer(count: 0); + + internal PgWriter WithFlushMode(FlushMode mode) + { + FlushMode = mode; + return this; + } + + void Ensure(int count = 1) + { + if (count <= Remaining) + return; + + Slow(count); + + [MethodImpl(MethodImplOptions.NoInlining)] + void Slow(int count) + { + // Try to re-request a larger size. + Commit(); + RequestBuffer(count); + // GetMemory is expected to throw if count is too large for the remaining space. + Debug.Assert(count <= Remaining); + } + } + + Span Span => _buffer.AsSpan(_pos, _length - _pos); + + int Remaining => _length - _pos; + + void Advance(int count) => _pos += count; + + void Commit() + { + var written = _pos - _offset; + _totalBytesWritten += written; + _writer.Advance(written); + _offset = _pos; + } + + internal void CommitAndResetTotal(int expectedByteCount) + { + Commit(); + + var totalBytesWritten = _totalBytesWritten; + _totalBytesWritten = 0; + if (totalBytesWritten != expectedByteCount) + ThrowHelper.ThrowInvalidOperationException($"Bytes written ({totalBytesWritten}) and expected byte count ({expectedByteCount}) don't match."); + } + + internal ValueTask StartWrite(bool async, in PgValueBinding binding, CancellationToken cancellationToken) + { + if (binding.IsDbNullBinding) + ThrowHelper.ThrowArgumentException("Binding context cannot be for a DbNull.", nameof(binding)); + + var bufferRequirement = binding.BufferRequirement; + var size = binding.Size.GetValueOrDefault(); + _current = new ValueMetadata + { + Format = binding.DataFormat, + BufferRequirement = bufferRequirement, + Size = size, + // WriteState is generally null, checking for null and showing the null literal to the JIT allows us to skip the write barrier if so. + WriteState = binding.WriteState is null ? null : binding.WriteState + }; + + return ShouldFlush(BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, size.GetValueOrDefault())) + ? Flush(async, cancellationToken) + : new(); + } + + internal void EndWrite(Size expectedByteCount) + => CommitAndResetTotal(expectedByteCount.GetValueOrDefault()); + + public ValueMetadata Current => _current; + + // This method lives here to remove the chances oids will be cached on converters inadvertently when data type names should be used. + // Such a mapping (for instance for array element oids) should be done per operation to ensure it is done in the context of a specific backend. + public void WriteAsOid(PgTypeId pgTypeId) + { + var oid = _typeCatalog!.GetOid(pgTypeId); + WriteUInt32((uint)oid); + } + + public void WriteByte(byte value) + { + Ensure(sizeof(byte)); + Span[0] = value; + Advance(sizeof(byte)); + } + + public void WriteInt16(short value) + { + Ensure(sizeof(short)); + BinaryPrimitives.WriteInt16BigEndian(Span, value); + Advance(sizeof(short)); + } + + public void WriteInt32(int value) + { + Ensure(sizeof(int)); + BinaryPrimitives.WriteInt32BigEndian(Span, value); + Advance(sizeof(int)); + } + + public void WriteInt64(long value) + { + Ensure(sizeof(long)); + BinaryPrimitives.WriteInt64BigEndian(Span, value); + Advance(sizeof(long)); + } + + public void WriteUInt16(ushort value) + { + Ensure(sizeof(ushort)); + BinaryPrimitives.WriteUInt16BigEndian(Span, value); + Advance(sizeof(ushort)); + } + + public void WriteUInt32(uint value) + { + Ensure(sizeof(uint)); + BinaryPrimitives.WriteUInt32BigEndian(Span, value); + Advance(sizeof(uint)); + } + + public void WriteUInt64(ulong value) + { + Ensure(sizeof(ulong)); + BinaryPrimitives.WriteUInt64BigEndian(Span, value); + Advance(sizeof(ulong)); + } + + public void WriteFloat(float value) + { + Ensure(sizeof(float)); + BinaryPrimitives.WriteSingleBigEndian(Span, value); + Advance(sizeof(float)); + } + + public void WriteDouble(double value) + { + Ensure(sizeof(double)); + BinaryPrimitives.WriteDoubleBigEndian(Span, value); + Advance(sizeof(double)); + } + + public void WriteChars(ReadOnlySpan data, Encoding encoding) + { + if (encoding.CodePage == Encoding.UTF8.CodePage) + { + var fallback = encoding.EncoderFallback; + // We can only emulate these well known fallbacks in the fast path. + if (EncoderFallback.ExceptionFallback.Equals(fallback) || EncoderFallback.ReplacementFallback.Equals(fallback)) + { + Utf8Core(data, replace: !EncoderFallback.ExceptionFallback.Equals(fallback), scalarMaxByteCount: 4); + return; + } + } + + // If we have more chars than bytes remaining we can immediately go to the slow path. + if (data.Length <= Remaining) + { + // If not, it's worth a shot to see if we can convert in one go. + if (!ShouldFlush(encoding.GetMaxByteCount(data.Length)) || !ShouldFlush(encoding.GetByteCount(data))) + { + var count = encoding.GetBytes(data, Span); + Advance(count); + return; + } + } + + Core(data, encoding); + + void Utf8Core(ReadOnlySpan data, bool replace, int scalarMaxByteCount) + { + while (true) + { + var status = Utf8.FromUtf16(data, Span, out var charsRead, out var bytesWritten, replaceInvalidSequences: replace, isFinalBlock: true); + Advance(bytesWritten); + + switch (status) + { + case OperationStatus.DestinationTooSmall: + Flush(); + Ensure(scalarMaxByteCount); + data = data.Slice(charsRead); + break; + case OperationStatus.InvalidData: + ThrowEncoderFallbackException(); + break; + default: + return; + } + } + + static void ThrowEncoderFallbackException() + => throw new EncoderFallbackException("Unable to translate Unicode character to specified code page"); + } + + void Core(ReadOnlySpan data, Encoding encoding) + { + var encoder = encoding.GetEncoder(); + var minBufferSize = encoding.GetMaxByteCount(1); + + bool completed; + do + { + if (ShouldFlush(minBufferSize)) + Flush(); + Ensure(minBufferSize); + encoder.Convert(data, Span, flush: true, out var charsUsed, out var bytesUsed, out completed); + data = data.Slice(charsUsed); + Advance(bytesUsed); + } while (!completed); + } + } + + public ValueTask WriteCharsAsync(ReadOnlyMemory data, Encoding encoding, CancellationToken cancellationToken = default) + { + if (encoding.CodePage == Encoding.UTF8.CodePage) + { + var fallback = encoding.EncoderFallback; + // We can only emulate these well known fallbacks in the fast path. + if (EncoderFallback.ExceptionFallback.Equals(fallback) || EncoderFallback.ReplacementFallback.Equals(fallback)) + return Utf8Core(data, replace: !EncoderFallback.ExceptionFallback.Equals(fallback), scalarMaxByteCount: 4, cancellationToken); + } + + var dataSpan = data.Span; + // If we have more chars than bytes remaining we can immediately go to the slow path. + if (data.Length <= Remaining) + { + // If not, it's worth a shot to see if we can convert in one go. + if (!ShouldFlush(encoding.GetMaxByteCount(data.Length)) || !ShouldFlush(encoding.GetByteCount(dataSpan))) + { + var count = encoding.GetBytes(dataSpan, Span); + Advance(count); + return new(); + } + } + + return Core(data, encoding, cancellationToken); + + async ValueTask Utf8Core(ReadOnlyMemory data, bool replace, int scalarMaxByteCount, CancellationToken cancellationToken) + { + while (true) + { + var status = Utf8.FromUtf16(data.Span, Span, out var charsRead, out var bytesWritten, replaceInvalidSequences: replace, + isFinalBlock: true); + Advance(bytesWritten); + + switch (status) + { + case OperationStatus.DestinationTooSmall: + await FlushAsync(cancellationToken).ConfigureAwait(false); + Ensure(scalarMaxByteCount); + data = data.Slice(charsRead); + break; + case OperationStatus.InvalidData: + ThrowEncoderFallbackException(); + break; + default: + return; + } + } + + static void ThrowEncoderFallbackException() + => throw new EncoderFallbackException("Unable to translate Unicode character to specified code page"); + } + + async ValueTask Core(ReadOnlyMemory data, Encoding encoding, CancellationToken cancellationToken) + { + var encoder = encoding.GetEncoder(); + var minBufferSize = encoding.GetMaxByteCount(1); + + bool completed; + do + { + if (ShouldFlush(minBufferSize)) + await FlushAsync(cancellationToken).ConfigureAwait(false); + Ensure(minBufferSize); + encoder.Convert(data.Span, Span, flush: true, out var charsUsed, out var bytesUsed, out completed); + data = data.Slice(charsUsed); + Advance(bytesUsed); + } while (!completed); + } + } + + public void WriteBytes(ReadOnlySpan buffer) + => WriteBytes(allowMixedIO: false, buffer); + + internal void WriteBytes(bool allowMixedIO, ReadOnlySpan buffer) + { + while (!buffer.IsEmpty) + { + if (Remaining is 0) + Flush(allowWhenNonBlocking: allowMixedIO); + var write = Math.Min(buffer.Length, Remaining); + buffer.Slice(0, write).CopyTo(Span); + Advance(write); + buffer = buffer.Slice(write); + } + } + + public ValueTask WriteBytesAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + => WriteBytesAsync(allowMixedIO: false, buffer, cancellationToken); + + internal ValueTask WriteBytesAsync(bool allowMixedIO, ReadOnlyMemory buffer, CancellationToken cancellationToken) + { + if (buffer.Length <= Remaining) + { + buffer.Span.CopyTo(Span); + Advance(buffer.Length); + return new(); + } + + return Core(allowMixedIO, buffer, cancellationToken); + + async ValueTask Core(bool allowMixedIO, ReadOnlyMemory buffer, CancellationToken cancellationToken) + { + while (!buffer.IsEmpty) + { + if (Remaining is 0) + await FlushAsync(allowWhenBlocking: allowMixedIO, cancellationToken).ConfigureAwait(false); + var write = Math.Min(buffer.Length, Remaining); + buffer.Span.Slice(0, write).CopyTo(Span); + Advance(write); + buffer = buffer.Slice(write); + } + } + } + + /// + /// Gets a that can be used to write to the underlying buffer. + /// + /// Blocking flushes during writes that were expected to be non-blocking and vice versa cause an exception to be thrown unless allowMixedIO is set to true, false by default. + /// The stream. + public Stream GetStream(bool allowMixedIO = false) + => new PgWriterStream(this, allowMixedIO); + + public bool ShouldFlush(int byteCount) => Remaining < byteCount && FlushMode is not FlushMode.None; + + public void Flush(TimeSpan timeout = default) + => Flush(allowWhenNonBlocking: false, timeout); + + void Flush(bool allowWhenNonBlocking, TimeSpan timeout = default) + { + switch (FlushMode) + { + case FlushMode.None: + return; + case FlushMode.NonBlocking when !allowWhenNonBlocking: + throw new NotSupportedException($"Cannot call {nameof(Flush)} on a non-blocking {nameof(PgWriter)}, call FlushAsync instead."); + } + + if (_writer is not IStreamingWriter writer) + throw new NotSupportedException($"Cannot call {nameof(Flush)} on a buffered {nameof(PgWriter)}, {nameof(FlushMode)}.{nameof(FlushMode.None)} should be used to prevent this."); + + Commit(); + writer.Flush(timeout); + RequestBuffer(count: 0); + } + + public ValueTask FlushAsync(CancellationToken cancellationToken = default) + => FlushAsync(allowWhenBlocking: false, cancellationToken); + + async ValueTask FlushAsync(bool allowWhenBlocking, CancellationToken cancellationToken = default) + { + switch (FlushMode) + { + case FlushMode.None: + return; + case FlushMode.Blocking when !allowWhenBlocking: + throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a blocking {nameof(PgWriter)}, call Flush instead."); + } + + if (_writer is not IStreamingWriter writer) + throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a buffered {nameof(PgWriter)}, {nameof(FlushMode)}.{nameof(FlushMode.None)} should be used to prevent this."); + + Commit(); + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + RequestBuffer(count: 0); + } + + internal ValueTask Flush(bool async, CancellationToken cancellationToken = default) + { + if (async) + return FlushAsync(cancellationToken); + + Flush(); + return new(); + } + + internal ValueTask BeginNestedWrite(bool async, Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken) + { + Debug.Assert(bufferRequirement != -1); + + var bufferRequirementByteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, byteCount); + _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; + + if (ShouldFlush(bufferRequirementByteCount)) + return Core(async, cancellationToken); + + return new(new NestedWriteScope()); + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask Core(bool async, CancellationToken cancellationToken) + { + await Flush(async, cancellationToken).ConfigureAwait(false); + return new(); + } + } + + public NestedWriteScope BeginNestedWrite(Size bufferRequirement, int byteCount, object? state) + => BeginNestedWrite(async: false, bufferRequirement, byteCount, state, CancellationToken.None).GetAwaiter().GetResult(); + + public ValueTask BeginNestedWriteAsync(Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken = default) + => BeginNestedWrite(async: true, bufferRequirement, byteCount, state, cancellationToken); + + sealed class PgWriterStream : Stream + { + readonly PgWriter _writer; + readonly bool _allowMixedIO; + + internal PgWriterStream(PgWriter writer, bool allowMixedIO) + { + _writer = writer; + _allowMixedIO = allowMixedIO; + } + + public override void Write(byte[] buffer, int offset, int count) + => Write(async: false, buffer: buffer, offset: offset, count: count, CancellationToken.None).GetAwaiter().GetResult(); + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => Write(async: true, buffer: buffer, offset: offset, count: count, cancellationToken: cancellationToken); + + Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); + if (buffer.Length - offset < count) + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + + if (async) + { + if (cancellationToken.IsCancellationRequested) + return Task.FromCanceled(cancellationToken); + + return _writer.WriteBytesAsync(_allowMixedIO, buffer.AsMemory(offset, count), cancellationToken).AsTask(); + } + + _writer.WriteBytes(_allowMixedIO, new Span(buffer, offset, count)); + return Task.CompletedTask; + } + + public override void Write(ReadOnlySpan buffer) => _writer.WriteBytes(_allowMixedIO, buffer); + + public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + { + if (cancellationToken.IsCancellationRequested) + return new(Task.FromCanceled(cancellationToken)); + + return _writer.WriteBytesAsync(buffer, cancellationToken); + } + + public override void Flush() + => _writer.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) + => _writer.FlushAsync(cancellationToken).AsTask(); + + public override bool CanRead => false; + public override bool CanWrite => true; + public override bool CanSeek => false; + + public override int Read(byte[] buffer, int offset, int count) + => throw new NotSupportedException(); + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => throw new NotSupportedException(); + + public override long Length => throw new NotSupportedException(); + public override void SetLength(long value) + => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException(); + } +} + +// No-op for now. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public struct NestedWriteScope : IDisposable +{ + public void Dispose() + { + } +} diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs new file mode 100644 index 0000000000..8dd91b5508 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -0,0 +1,252 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal.Postgres; + +/// +/// Represents the normalized name of a PostgreSQL data type. +/// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +[DebuggerDisplay("{DisplayName,nq}")] +public readonly struct DataTypeName : IEquatable +{ + const char InvalidIdentifier = '-'; + + /// + /// The maximum length of names in an unmodified PostgreSQL installation. + /// + /// + /// We need to respect this to get to valid names when deriving them (for multirange/arrays etc). + /// This does not include the namespace. + /// + internal const int NAMEDATALEN = 64 - 1; // Minus null terminator. + + readonly string _value; + + DataTypeName(string fullyQualifiedDataTypeName, bool validated) + { + if (!validated) + { + var schemaEndIndex = fullyQualifiedDataTypeName.IndexOf('.'); + if (schemaEndIndex is -1 or 0) + throw new ArgumentException("Given value does not contain a schema.", nameof(fullyQualifiedDataTypeName)); + + // Friendly array syntax is the only fully qualified name quirk that's allowed by postgres (see FromDisplayName). + if (fullyQualifiedDataTypeName.AsSpan(schemaEndIndex).EndsWith("[]".AsSpan())) + fullyQualifiedDataTypeName = NormalizeName(fullyQualifiedDataTypeName); + + var typeNameLength = fullyQualifiedDataTypeName.Length - (schemaEndIndex + 1); + if (typeNameLength > NAMEDATALEN) + throw new ArgumentException( + $"Name is too long and would be truncated to: {fullyQualifiedDataTypeName.Substring(0, + fullyQualifiedDataTypeName.Length - typeNameLength + NAMEDATALEN)}"); + } + + _value = fullyQualifiedDataTypeName; + } + + public DataTypeName(string fullyQualifiedDataTypeName) + : this(fullyQualifiedDataTypeName, validated: false) { } + + internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) + => new(fullyQualifiedDataTypeName, validated: true); + + bool IsUnqualifiedDisplayName => SchemaSpan is "pg_catalog" || IsUnqualified; + + // Includes schema unless it's pg_catalog or the schema is an invalid character used to represent an unspecified schema. + public string DisplayName => + IsUnqualifiedDisplayName + ? UnqualifiedDisplayName + : Schema + "." + UnqualifiedDisplayName; + + public string UnqualifiedDisplayName => ToDisplayName(UnqualifiedNameSpan, mapAliases: IsUnqualifiedDisplayName); + + internal ReadOnlySpan SchemaSpan => Value.AsSpan(0, _value.IndexOf('.')); + public string Schema => Value.Substring(0, _value.IndexOf('.')); + internal ReadOnlySpan UnqualifiedNameSpan => Value.AsSpan(_value.IndexOf('.') + 1); + public string UnqualifiedName => Value.Substring(_value.IndexOf('.') + 1); + public string Value => _value is null ? ThrowDefaultException() : _value; + + static string ThrowDefaultException() => + throw new InvalidOperationException($"This operation cannot be performed on a default value of {nameof(DataTypeName)}."); + + public static implicit operator string(DataTypeName value) => value.Value; + + // This contains two invalid sql identifiers (schema and name are both separate identifiers, and would both have to be quoted to be valid). + // Given this is an invalid name it's fine for us to represent a fully qualified 'unspecified' name with it. + static string UnspecifiedName => $"{InvalidIdentifier}.{InvalidIdentifier}"; + public static DataTypeName Unspecified => ValidatedName(UnspecifiedName); + + public static string GetUnqualifiedName(string dataTypeName) + => dataTypeName.IndexOf('.') is not -1 and var index + ? dataTypeName.Substring(index + 1) : dataTypeName; + + public bool IsUnqualified => Value.StartsWith(InvalidIdentifier) && Value != UnspecifiedName; + + public bool IsArray => UnqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal); + + internal static DataTypeName CreateFullyQualifiedName(string dataTypeName) + => dataTypeName.IndexOf('.') != -1 ? new(dataTypeName) : new("-." + dataTypeName); + + // Static transform as defined by https://www.postgresql.org/docs/current/sql-createtype.html#SQL-CREATETYPE-ARRAY + // We don't have to deal with [] as we're always starting from a normalized fully qualified name. + public DataTypeName ToArrayName() + { + var unqualifiedNameSpan = UnqualifiedNameSpan; + if (unqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal)) + return this; + + if (unqualifiedNameSpan.Length + "_".Length > NAMEDATALEN) + unqualifiedNameSpan = unqualifiedNameSpan.Slice(0, NAMEDATALEN - "_".Length); + + return new(string.Concat(Schema, "._", unqualifiedNameSpan)); + } + + // Static transform as defined by https://www.postgresql.org/docs/current/sql-createtype.html#SQL-CREATETYPE-RANGE + // Manual testing on PG confirmed it's only the first occurence of 'range' that gets replaced. + public DataTypeName ToDefaultMultirangeName() + { + var nameSpan = UnqualifiedNameSpan; + if (nameSpan.IndexOf("multirange".AsSpan(), StringComparison.Ordinal) is not -1) + return this; + + if (nameSpan.IndexOf("range", StringComparison.Ordinal) is var rangeIndex and not -1) + { + nameSpan = string.Concat(nameSpan.Slice(0, rangeIndex), "multirange", nameSpan.Slice(rangeIndex + "range".Length)); + return new(string.Concat(SchemaSpan, ".", + nameSpan.Length > NAMEDATALEN ? nameSpan.Slice(0, NAMEDATALEN) : nameSpan)); + } + + if (nameSpan.Length + "_multirange".Length > NAMEDATALEN) + nameSpan = nameSpan.Slice(0, NAMEDATALEN - "_multirange".Length); + + return new(string.Concat(SchemaSpan, ".", nameSpan, "_multirange")); + } + + // Create a DataTypeName from a broader range of valid names. + // including SQL aliases like 'timestamp without time zone', trailing facet info etc. + public static DataTypeName FromDisplayName(string displayName) + { + var displayNameSpan = displayName.AsSpan().Trim(); + + var schemaEndIndex = displayNameSpan.IndexOf('.'); + ReadOnlySpan schemaSpan; + if (schemaEndIndex is not -1) + { + schemaSpan = displayNameSpan.Slice(0, schemaEndIndex); + displayNameSpan = displayNameSpan.Slice(schemaEndIndex + 1); + } + else + { + schemaSpan = $"{InvalidIdentifier}"; + } + + // Then we strip either of the two valid array representations to get the base type name (with or without facets). + var isArray = false; + if (displayNameSpan.StartsWith("_", StringComparison.Ordinal)) + { + isArray = true; + displayNameSpan = displayNameSpan.Slice(1); + } + else if (displayNameSpan.EndsWith("[]", StringComparison.Ordinal)) + { + isArray = true; + displayNameSpan = displayNameSpan.Slice(0, displayNameSpan.Length - 2); + } + + if (schemaEndIndex is not -1) + { + // If we have a schema we're done, Postgres doesn't do display name conversions on fully qualified names. + // There is one exception and that's array syntax, which is always resolvable in both ways, while we want the canonical name. + return !isArray + ? new(displayName.Length == schemaEndIndex + displayNameSpan.Length + ? displayName + : string.Concat(schemaSpan, ".", displayNameSpan)) + : new(string.Concat(schemaSpan, ".", "_", displayNameSpan)); + } + + // Finally we strip the facet info. + var parenIndex = displayNameSpan.IndexOf('('); + if (parenIndex > -1) + displayNameSpan = displayNameSpan.Slice(0, parenIndex); + + // Map any aliases to the internal type name. + var mapped = displayNameSpan switch + { + "boolean" => "bool", + "character" => "bpchar", + "decimal" => "numeric", + "real" => "float4", + "double precision" => "float8", + "smallint" => "int2", + "integer" => "int4", + "bigint" => "int8", + "time without time zone" => "time", + "timestamp without time zone" => "timestamp", + "time with time zone" => "timetz", + "timestamp with time zone" => "timestamptz", + "bit varying" => "varbit", + "character varying" => "varchar", + var value => value + }; + + if (DataTypeNames.IsWellKnownUnqualifiedName(mapped)) + schemaSpan = "pg_catalog".AsSpan(); + + return new(string.Concat(schemaSpan, ".", isArray ? "_" : "", mapped)); + } + + // The type names stored in a DataTypeName are usually the actual typname from the pg_type column. + // There are some canonical aliases defined in the SQL standard which we take into account. + // Additionally array types have a '_' prefix while for readability their element type should be postfixed with '[]'. + // See the table for all the aliases https://www.postgresql.org/docs/current/static/datatype.html#DATATYPE-TABLE + // Alternatively some of the source lives at https://github.com/postgres/postgres/blob/c8e1ba736b2b9e8c98d37a5b77c4ed31baf94147/src/backend/utils/adt/format_type.c#L186 + static string ToDisplayName(ReadOnlySpan unqualifiedName, bool mapAliases) + { + var isArray = unqualifiedName.IndexOf('_') is 0; + var baseTypeName = isArray ? unqualifiedName.Slice(1) : unqualifiedName; + + string? mappedBaseType = null; + if (mapAliases) + { + mappedBaseType = baseTypeName switch + { + "bool" => "boolean", + "bpchar" => "character", + "decimal" => "numeric", + "float4" => "real", + "float8" => "double precision", + "int2" => "smallint", + "int4" => "integer", + "int8" => "bigint", + "time" => "time without time zone", + "timestamp" => "timestamp without time zone", + "timetz" => "time with time zone", + "timestamptz" => "timestamp with time zone", + "varbit" => "bit varying", + "varchar" => "character varying", + _ => null + }; + } + + return isArray + ? string.Concat(mappedBaseType ?? baseTypeName, "[]") + : mappedBaseType ?? baseTypeName.ToString(); + } + + internal static bool IsFullyQualified(ReadOnlySpan dataTypeName) => dataTypeName.Contains(".".AsSpan(), StringComparison.Ordinal); + + internal static string NormalizeName(string dataTypeName) + { + var fqName = FromDisplayName(dataTypeName); + return IsFullyQualified(dataTypeName.AsSpan()) ? fqName.Value : fqName.UnqualifiedName; + } + + public override string ToString() => Value; + public bool Equals(DataTypeName other) => string.Equals(_value, other._value); + public override bool Equals(object? obj) => obj is DataTypeName other && Equals(other); + public override int GetHashCode() => _value.GetHashCode(); + public static bool operator ==(DataTypeName left, DataTypeName right) => left.Equals(right); + public static bool operator !=(DataTypeName left, DataTypeName right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/Postgres/DataTypeNames.cs b/src/Npgsql/Internal/Postgres/DataTypeNames.cs new file mode 100644 index 0000000000..d904c8ae33 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/DataTypeNames.cs @@ -0,0 +1,110 @@ +using System; +using static Npgsql.Internal.Postgres.DataTypeName; + +namespace Npgsql.Internal.Postgres; + +/// +/// Well-known PostgreSQL data type names. +/// +static class DataTypeNames +{ + // Generated from the following query: + // SELECT '"' || string_agg(typname, '" or "') || '"' FROM ( + // SELECT typname FROM pg_catalog.pg_type WHERE typtype = 'b' AND typcategory <> 'A' + // AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'pg_catalog') ORDER BY typname); + public static bool IsWellKnownUnqualifiedName(ReadOnlySpan name) => name switch + { + "aclitem" or "bit" or "bool" or "box" or "bpchar" or "bytea" or "char" or "cid" or + "cidr" or "circle" or "date" or "float4" or "float8" or "gtsvector" or "inet" or + "int2" or "int4" or "int8" or "interval" or "json" or "jsonb" or "jsonpath" or + "line" or "lseg" or "macaddr" or "macaddr8" or "money" or "name" or "numeric" or + "oid" or "path" or "pg_brin_bloom_summary" or "pg_brin_minmax_multi_summary" or + "pg_dependencies" or "pg_lsn" or "pg_mcv_list" or "pg_ndistinct" or "pg_node_tree" or + "pg_snapshot" or "point" or "polygon" or "refcursor" or "regclass" or "regcollation" or + "regconfig" or "regdictionary" or "regnamespace" or "regoper" or "regoperator" or + "regproc" or "regprocedure" or "regrole" or "regtype" or "text" or "tid" or "time" or + "timestamp" or "timestamptz" or "timetz" or "tsquery" or "tsvector" or "txid_snapshot" or + "uuid" or "varbit" or "varchar" or "xid" or "xid8" or "xml" + => true, + _ => false + }; + + // Note: The names are fully qualified in source so the strings are constants and instances will be interned after the first call. + // Uses an internal constructor bypassing the public DataTypeName constructor validation, as we don't want to store all these names on + // fields either. + public static DataTypeName Int2 => ValidatedName("pg_catalog.int2"); + public static DataTypeName Int4 => ValidatedName("pg_catalog.int4"); + public static DataTypeName Int4Range => ValidatedName("pg_catalog.int4range"); + public static DataTypeName Int4Multirange => ValidatedName("pg_catalog.int4multirange"); + public static DataTypeName Int8 => ValidatedName("pg_catalog.int8"); + public static DataTypeName Int8Range => ValidatedName("pg_catalog.int8range"); + public static DataTypeName Int8Multirange => ValidatedName("pg_catalog.int8multirange"); + public static DataTypeName Float4 => ValidatedName("pg_catalog.float4"); + public static DataTypeName Float8 => ValidatedName("pg_catalog.float8"); + public static DataTypeName Numeric => ValidatedName("pg_catalog.numeric"); + public static DataTypeName NumRange => ValidatedName("pg_catalog.numrange"); + public static DataTypeName NumMultirange => ValidatedName("pg_catalog.nummultirange"); + public static DataTypeName Money => ValidatedName("pg_catalog.money"); + public static DataTypeName Bool => ValidatedName("pg_catalog.bool"); + public static DataTypeName Box => ValidatedName("pg_catalog.box"); + public static DataTypeName Circle => ValidatedName("pg_catalog.circle"); + public static DataTypeName Line => ValidatedName("pg_catalog.line"); + public static DataTypeName LSeg => ValidatedName("pg_catalog.lseg"); + public static DataTypeName Path => ValidatedName("pg_catalog.path"); + public static DataTypeName Point => ValidatedName("pg_catalog.point"); + public static DataTypeName Polygon => ValidatedName("pg_catalog.polygon"); + public static DataTypeName Bpchar => ValidatedName("pg_catalog.bpchar"); + public static DataTypeName Text => ValidatedName("pg_catalog.text"); + public static DataTypeName Varchar => ValidatedName("pg_catalog.varchar"); + public static DataTypeName Char => ValidatedName("pg_catalog.char"); + public static DataTypeName Name => ValidatedName("pg_catalog.name"); + public static DataTypeName Bytea => ValidatedName("pg_catalog.bytea"); + public static DataTypeName Date => ValidatedName("pg_catalog.date"); + public static DataTypeName DateRange => ValidatedName("pg_catalog.daterange"); + public static DataTypeName DateMultirange => ValidatedName("pg_catalog.datemultirange"); + public static DataTypeName Time => ValidatedName("pg_catalog.time"); + public static DataTypeName Timestamp => ValidatedName("pg_catalog.timestamp"); + public static DataTypeName TsRange => ValidatedName("pg_catalog.tsrange"); + public static DataTypeName TsMultirange => ValidatedName("pg_catalog.tsmultirange"); + public static DataTypeName TimestampTz => ValidatedName("pg_catalog.timestamptz"); + public static DataTypeName TsTzRange => ValidatedName("pg_catalog.tstzrange"); + public static DataTypeName TsTzMultirange => ValidatedName("pg_catalog.tstzmultirange"); + public static DataTypeName Interval => ValidatedName("pg_catalog.interval"); + public static DataTypeName TimeTz => ValidatedName("pg_catalog.timetz"); + public static DataTypeName Inet => ValidatedName("pg_catalog.inet"); + public static DataTypeName Cidr => ValidatedName("pg_catalog.cidr"); + public static DataTypeName MacAddr => ValidatedName("pg_catalog.macaddr"); + public static DataTypeName MacAddr8 => ValidatedName("pg_catalog.macaddr8"); + public static DataTypeName Bit => ValidatedName("pg_catalog.bit"); + public static DataTypeName Varbit => ValidatedName("pg_catalog.varbit"); + public static DataTypeName TsVector => ValidatedName("pg_catalog.tsvector"); + public static DataTypeName TsQuery => ValidatedName("pg_catalog.tsquery"); + public static DataTypeName RegClass => ValidatedName("pg_catalog.regclass"); + public static DataTypeName RegCollation => ValidatedName("pg_catalog.regcollation"); + public static DataTypeName RegConfig => ValidatedName("pg_catalog.regconfig"); + public static DataTypeName RegDictionary => ValidatedName("pg_catalog.regdictionary"); + public static DataTypeName RegNamespace => ValidatedName("pg_catalog.regnamespace"); + public static DataTypeName RegOper => ValidatedName("pg_catalog.regoper"); + public static DataTypeName RegOperator => ValidatedName("pg_catalog.regoperator"); + public static DataTypeName RegProc => ValidatedName("pg_catalog.regproc"); + public static DataTypeName RegProcedure => ValidatedName("pg_catalog.regprocedure"); + public static DataTypeName RegRole => ValidatedName("pg_catalog.regrole"); + public static DataTypeName Uuid => ValidatedName("pg_catalog.uuid"); + public static DataTypeName Xml => ValidatedName("pg_catalog.xml"); + public static DataTypeName Json => ValidatedName("pg_catalog.json"); + public static DataTypeName Jsonb => ValidatedName("pg_catalog.jsonb"); + public static DataTypeName Jsonpath => ValidatedName("pg_catalog.jsonpath"); + public static DataTypeName Record => ValidatedName("pg_catalog.record"); + public static DataTypeName RefCursor => ValidatedName("pg_catalog.refcursor"); + public static DataTypeName OidVector => ValidatedName("pg_catalog.oidvector"); + public static DataTypeName Int2Vector => ValidatedName("pg_catalog.int2vector"); + public static DataTypeName Oid => ValidatedName("pg_catalog.oid"); + public static DataTypeName Xid => ValidatedName("pg_catalog.xid"); + public static DataTypeName Xid8 => ValidatedName("pg_catalog.xid8"); + public static DataTypeName Cid => ValidatedName("pg_catalog.cid"); + public static DataTypeName RegType => ValidatedName("pg_catalog.regtype"); + public static DataTypeName Tid => ValidatedName("pg_catalog.tid"); + public static DataTypeName PgLsn => ValidatedName("pg_catalog.pg_lsn"); + public static DataTypeName Unknown => ValidatedName("pg_catalog.unknown"); + public static DataTypeName Void => ValidatedName("pg_catalog.void"); +} diff --git a/src/Npgsql/Internal/Postgres/Field.cs b/src/Npgsql/Internal/Postgres/Field.cs new file mode 100644 index 0000000000..bae177bd75 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/Field.cs @@ -0,0 +1,14 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal.Postgres; + +/// Base field type shared between tables and composites. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public readonly struct Field(string name, PgTypeId pgTypeId, int typeModifier) +{ + public string Name { get; init; } = name; + public PgTypeId PgTypeId { get; init; } = pgTypeId; + public int TypeModifier { get; init; } = typeModifier; + + public static Field CreateUnspecified(PgTypeId pgTypeId) => new("?", pgTypeId, -1); +} diff --git a/src/Npgsql/Internal/Postgres/Oid.cs b/src/Npgsql/Internal/Postgres/Oid.cs new file mode 100644 index 0000000000..8c01e65ff7 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/Oid.cs @@ -0,0 +1,20 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal.Postgres; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public readonly struct Oid(uint value) : IEquatable +{ + public static explicit operator uint(Oid oid) => oid.Value; + public static implicit operator Oid(uint oid) => new(oid); + public uint Value { get; init; } = value; + public static Oid Unspecified => new(0); + + public override string ToString() => Value.ToString(); + public bool Equals(Oid other) => Value == other.Value; + public override bool Equals(object? obj) => obj is Oid other && Equals(other); + public override int GetHashCode() => (int)Value; + public static bool operator ==(Oid left, Oid right) => left.Equals(right); + public static bool operator !=(Oid left, Oid right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/Postgres/PgTypeId.cs b/src/Npgsql/Internal/Postgres/PgTypeId.cs new file mode 100644 index 0000000000..ee5ffb9d41 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/PgTypeId.cs @@ -0,0 +1,48 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal.Postgres; + +/// +/// A discriminated union of and . +/// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public readonly struct PgTypeId: IEquatable +{ + readonly DataTypeName _dataTypeName; + readonly Oid _oid; + + public PgTypeId(DataTypeName name) => _dataTypeName = name; + public PgTypeId(Oid oid) => _oid = oid; + + [MemberNotNullWhen(true, nameof(_dataTypeName))] + public bool IsDataTypeName => _dataTypeName != default; + public bool IsOid => _dataTypeName == default; + + public DataTypeName DataTypeName + => IsDataTypeName ? _dataTypeName : throw new InvalidOperationException("This value does not describe a DataTypeName."); + + public Oid Oid + => IsOid ? _oid : throw new InvalidOperationException("This value does not describe an Oid."); + + public static implicit operator PgTypeId(DataTypeName name) => new(name); + public static implicit operator PgTypeId(Oid id) => new(id); + + public override string ToString() => IsOid ? "OID " + _oid : "DataTypeName " + _dataTypeName.Value; + + public bool Equals(PgTypeId other) + { + if (IsOid && other.IsOid) + return _oid == other._oid; + if (IsDataTypeName && other.IsDataTypeName) + return _dataTypeName.Equals(other._dataTypeName); + return false; + } + + public override bool Equals(object? obj) => obj is PgTypeId other && Equals(other); + public override int GetHashCode() => IsOid ? _oid.GetHashCode() : _dataTypeName.GetHashCode(); + public static bool operator ==(PgTypeId left, PgTypeId right) => left.Equals(right); + public static bool operator !=(PgTypeId left, PgTypeId right) => !left.Equals(right); + + internal bool IsUnspecified => IsOid && _oid == Oid.Unspecified || _dataTypeName == DataTypeName.Unspecified; +} diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs new file mode 100644 index 0000000000..8f0cf46bcf --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -0,0 +1,249 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Util; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.Internal.ResolverFactories; + +sealed partial class AdoTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateMultirangeResolver() => new MultirangeResolver(); + public override IPgTypeInfoResolver CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); + + class MultirangeResolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => options.DatabaseInfo.SupportsMultirangeTypes ? Mappings.Find(type, dataTypeName, options) : null; + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // int4multirange + mappings.AddType[]>(DataTypeNames.Int4Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.Int4Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options))); + + // int8multirange + mappings.AddType[]>(DataTypeNames.Int8Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.Int8Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + + // nummultirange + mappings.AddType[]>(DataTypeNames.NumMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.NumMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options))); + + // tsmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddType[]>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), + options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), + options), options))); + } + else + { + mappings.AddProviderType[]>(DataTypeNames.TsMultirange, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider[], NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), + isDefault: true); + mappings.AddProviderType>>(DataTypeNames.TsMultirange, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider>, NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), requiresDataTypeName)); + } + + mappings.AddType[]>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mappings.AddType>>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + + // tstzmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), + options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), + options), options))); + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), + options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), + options))); + } + else + { + mappings.AddProviderType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider[], NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), + isDefault: true); + mappings.AddProviderType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider>, NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), requiresDataTypeName)); + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options))); + } + + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + + // datemultirange + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + + return mappings; + } + } + + sealed class MultirangeArrayResolver : MultirangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => options.DatabaseInfo.SupportsMultirangeTypes ? Mappings.Find(type, dataTypeName, options) : null; + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // int4multirange + mappings.AddArrayType[]>(DataTypeNames.Int4Multirange); + mappings.AddArrayType>>(DataTypeNames.Int4Multirange); + + // int8multirange + mappings.AddArrayType[]>(DataTypeNames.Int8Multirange); + mappings.AddArrayType>>(DataTypeNames.Int8Multirange); + + // nummultirange + mappings.AddArrayType[]>(DataTypeNames.NumMultirange); + mappings.AddArrayType>>(DataTypeNames.NumMultirange); + + // tsmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddArrayType>>(DataTypeNames.TsMultirange); + } + else + { + mappings.AddProviderArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddProviderArrayType>>(DataTypeNames.TsMultirange); + } + + mappings.AddArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddArrayType>>(DataTypeNames.TsMultirange); + + // tstzmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + } + else + { + mappings.AddProviderArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddProviderArrayType>>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + } + + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + + // datemultirange + mappings.AddArrayType[]>(DataTypeNames.DateMultirange); + mappings.AddArrayType>>(DataTypeNames.DateMultirange); + mappings.AddArrayType[]>(DataTypeNames.DateMultirange); + mappings.AddArrayType>>(DataTypeNames.DateMultirange); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs new file mode 100644 index 0000000000..b145097881 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -0,0 +1,149 @@ +using System; +using System.Numerics; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Util; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.Internal.ResolverFactories; + +sealed partial class AdoTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateRangeResolver() => new RangeResolver(); + public override IPgTypeInfoResolver CreateRangeArrayResolver() => new RangeArrayResolver(); + + class RangeResolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // numeric ranges + mappings.AddStructType>(DataTypeNames.Int4Range, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.Int8Range, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.NumRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new DecimalNumericConverter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.NumRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new BigIntegerNumericConverter(), options))); + + // tsrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType>(DataTypeNames.TsRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options)), + isDefault: true); + } + else + { + mappings.AddProviderStructType>(DataTypeNames.TsRange, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateRangeProvider(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzRange), + options.GetCanonicalTypeId(DataTypeNames.TsRange), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), + isDefault: true); + } + mappings.AddStructType>(DataTypeNames.TsRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + + // tstzrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + } + else + { + mappings.AddProviderStructType>(DataTypeNames.TsTzRange, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateRangeProvider(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzRange), + options.GetCanonicalTypeId(DataTypeNames.TsRange), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), + isDefault: true); + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + } + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + + // daterange + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options))); + + return mappings; + } + } + + sealed class RangeArrayResolver : RangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // numeric ranges + mappings.AddStructArrayType>(DataTypeNames.Int4Range); + mappings.AddStructArrayType>(DataTypeNames.Int8Range); + mappings.AddStructArrayType>(DataTypeNames.NumRange); + mappings.AddStructArrayType>(DataTypeNames.NumRange); + + // tsrange + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType>(DataTypeNames.TsRange); + else + mappings.AddProviderStructArrayType>(DataTypeNames.TsRange); + mappings.AddStructArrayType>(DataTypeNames.TsRange); + + // tstzrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + } + else + { + mappings.AddProviderStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + } + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + + // daterange + mappings.AddStructArrayType>(DataTypeNames.DateRange); + mappings.AddStructArrayType>(DataTypeNames.DateRange); + mappings.AddStructArrayType>(DataTypeNames.DateRange); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..46ee4e40b8 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -0,0 +1,567 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Specialized; +using System.IO; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Converters.Internal; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Util; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed partial class AdoTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + Resolver ResolverInstance { get; } = new(); + + public static AdoTypeInfoResolverFactory Instance { get; } = new(); + + public override IPgTypeInfoResolver CreateResolver() => ResolverInstance; + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + // Baseline types that are always supported. + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var info = Mappings.Find(type, dataTypeName, options); + if (info is null && dataTypeName is not null) + info = GetEnumTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + if (info is null && type is not null && dataTypeName is not null) + info = GetStreamTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + + return info; + } + + static PgTypeInfo? GetStreamTypeInfo(Type type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type != typeof(Stream)) + return null; + + return new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), dataTypeName) { SupportsWriting = false }; + } + + static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is not null && type != typeof(object) && type != typeof(string) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) + return null; + + return new PgConcreteTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), dataTypeName, requestedType: type); + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // Bool + mappings.AddStructType(DataTypeNames.Bool, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolConverter()), isDefault: true); + + // Numeric + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + // Clr byte/sbyte maps to 'int2' as there is no byte type in PostgreSQL. + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Float4, + static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Float8, + static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Money, + static (options, mapping, _) => mapping.CreateInfo(options, new MoneyConverter()), MatchRequirement.DataTypeName); + + // Text + // Update PgSerializerOptions.IsWellKnownTextType(Type) after any changes to this list. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + mappings.AddStructType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + // Uses the bytea converters, as neither type has a header. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter(supportsTextFormat: true)), + MatchRequirement.DataTypeName); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter(supportsTextFormat: true)), + MatchRequirement.DataTypeName); + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + // Uses the bytea converters, as neither type has a header. + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter(supportsTextFormat: true)), + MatchRequirement.DataTypeName); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter(supportsTextFormat: true)), + MatchRequirement.DataTypeName); + mappings.AddType(dataTypeName, + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + } + + // Jsonb + const byte jsonbVersion = 1; + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, TextConverter.CreateStringConverter(options.TextEncoding))), isDefault: true); + mappings.AddStructType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharTextConverter(options.TextEncoding)))); + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new ArrayByteaConverter(supportsTextFormat: true))), + MatchRequirement.DataTypeName); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter(supportsTextFormat: true))), + MatchRequirement.DataTypeName); + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => new PgConcreteTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamConverter(supportsTextFormat: true)), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new GetCharsTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + + // Jsonpath + const byte jsonpathVersion = 1; + mappings.AddType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, TextConverter.CreateStringConverter(options.TextEncoding))), isDefault: true); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new GetCharsTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), + MatchRequirement.DataTypeName); + + // Bytea + mappings.AddType(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter(supportsTextFormat: false)), isDefault: true); + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter(supportsTextFormat: false))); + mappings.AddType(DataTypeNames.Bytea, + // TODO handling bytea textually would require conversions to hex strings, so currently we don't support it. + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: false), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), + mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + + // Varbit + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, + new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Varbit)), includeDataTypeName: true)); + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); + mappings.AddStructType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); + + // Bit + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, + new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Bit)), includeDataTypeName: true)); + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); + mappings.AddStructType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); + + // Timestamp + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType(DataTypeNames.Timestamp, + static (options, mapping, _) => mapping.CreateInfo(options, + new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true)), isDefault: true); + } + else + { + mappings.AddProviderStructType(DataTypeNames.Timestamp, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateProvider(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); + } + mappings.AddStructType(DataTypeNames.Timestamp, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // TimestampTz + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, + new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false)), matchRequirement: MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); + } + else + { + mappings.AddProviderStructType(DataTypeNames.TimestampTz, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, + DateTimeTypeInfoProvider.CreateProvider(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); + } + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // Date + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateTimeDateConverter(options.EnableDateTimeInfinityConversions)), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + + // Interval + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanIntervalConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); + + // Time + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter())); + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // TimeTz + mappings.AddStructType(DataTypeNames.TimeTz, + static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetTimeTzConverter()), + MatchRequirement.DataTypeName); + + // Uuid + mappings.AddStructType(DataTypeNames.Uuid, + static (options, mapping, _) => mapping.CreateInfo(options, new GuidUuidConverter()), isDefault: true); + + // Hstore + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding)), isDefault: true); + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding))); + + // Unknown + mappings.AddType(DataTypeNames.Unknown, + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Void + mappings.AddType(DataTypeNames.Void, + static (options, mapping, _) => mapping.CreateInfo(options, new VoidConverter(), supportsWriting: false), + MatchRequirement.DataTypeName); + + // UInt internal types + foreach (var dataTypeName in new[] + { + DataTypeNames.Oid, + DataTypeNames.Xid, + DataTypeNames.Cid, + DataTypeNames.RegClass, + DataTypeNames.RegCollation, + DataTypeNames.RegConfig, + DataTypeNames.RegDictionary, + DataTypeNames.RegNamespace, + DataTypeNames.RegOper, + DataTypeNames.RegOperator, + DataTypeNames.RegProc, + DataTypeNames.RegProcedure, + DataTypeNames.RegRole, + DataTypeNames.RegType + }) + { + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt32Converter()), + MatchRequirement.DataTypeName); + } + + // Char + mappings.AddStructType(DataTypeNames.Char, + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Char, + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), + MatchRequirement.DataTypeName); + + // Xid8 + mappings.AddStructType(DataTypeNames.Xid8, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter()), + MatchRequirement.DataTypeName); + + // Oidvector + mappings.AddType( + DataTypeNames.OidVector, + static (options, mapping, _) => mapping.CreateInfo(options, + ArrayConverter.CreateArrayBased(new(options, new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), + MatchRequirement.DataTypeName); + + // Int2vector + mappings.AddType( + DataTypeNames.Int2Vector, + static (options, mapping, _) => mapping.CreateInfo(options, + ArrayConverter.CreateArrayBased(new(options, new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), + MatchRequirement.DataTypeName); + + // Tid + mappings.AddStructType(DataTypeNames.Tid, + static (options, mapping, _) => mapping.CreateInfo(options, new TidConverter()), + MatchRequirement.DataTypeName); + + // PgLsn + mappings.AddStructType(DataTypeNames.PgLsn, + static (options, mapping, _) => mapping.CreateInfo(options, new PgLsnConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.PgLsn, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter()), + MatchRequirement.DataTypeName); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var info = Mappings.Find(type, dataTypeName, options); + + Type? elementType = null; + if (info is null && dataTypeName is not null + && options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType { Element: var pgElementType } + && (type is null || type == typeof(object) || TypeInfoMappingCollection.IsArrayLikeType(type, out elementType))) + { + info = GetEnumArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options) ?? + GetObjectArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options); + } + return info; + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // Bool + mappings.AddStructArrayType(DataTypeNames.Bool); + + // Numeric + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Float4); + mappings.AddStructArrayType(DataTypeNames.Float8); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Money); + + // Text + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType(DataTypeNames.Text); + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + mappings.AddArrayType(DataTypeNames.Text); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + // Jsonb + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType(DataTypeNames.Jsonb); + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + mappings.AddArrayType(DataTypeNames.Jsonb); + + // Jsonpath + mappings.AddArrayType(DataTypeNames.Jsonpath); + + // Bytea + mappings.AddArrayType(DataTypeNames.Bytea); + mappings.AddStructArrayType>(DataTypeNames.Bytea); + mappings.AddArrayType(DataTypeNames.Bytea); + + // Varbit + // Object mapping first. + mappings.AddPolymorphicProviderArrayType(DataTypeNames.Varbit, static options => concreteTypeInfo => concreteTypeInfo.Converter switch + { + BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( + () => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), + () => ArrayConverter.CreateArrayBased(new(options, new NullableConverter((PgConverter)concreteTypeInfo.Converter), concreteTypeInfo.PgTypeId), typeof(Array)), + options), + BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), + _ => throw new NotSupportedException() + }); + mappings.AddArrayType(DataTypeNames.Varbit); + mappings.AddStructArrayType(DataTypeNames.Varbit); + mappings.AddStructArrayType(DataTypeNames.Varbit); + + // Bit + // Object mapping first. + mappings.AddPolymorphicProviderArrayType(DataTypeNames.Bit, static options => concreteTypeInfo => concreteTypeInfo.Converter switch + { + BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( + () => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), + () => ArrayConverter.CreateArrayBased(new(options, new NullableConverter((PgConverter)concreteTypeInfo.Converter), concreteTypeInfo.PgTypeId), typeof(Array)), + options), + BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), + _ => throw new NotSupportedException() + }); + mappings.AddArrayType(DataTypeNames.Bit); + mappings.AddStructArrayType(DataTypeNames.Bit); + mappings.AddStructArrayType(DataTypeNames.Bit); + + // Timestamp + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType(DataTypeNames.Timestamp); + else + mappings.AddProviderStructArrayType(DataTypeNames.Timestamp); + mappings.AddStructArrayType(DataTypeNames.Timestamp); + + // TimestampTz + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + else + mappings.AddProviderStructArrayType(DataTypeNames.TimestampTz); + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + + // Date + mappings.AddStructArrayType(DataTypeNames.Date); + mappings.AddStructArrayType(DataTypeNames.Date); + mappings.AddStructArrayType(DataTypeNames.Date); + + // Interval + mappings.AddStructArrayType(DataTypeNames.Interval); + mappings.AddStructArrayType(DataTypeNames.Interval); + + // Time + mappings.AddStructArrayType(DataTypeNames.Time); + mappings.AddStructArrayType(DataTypeNames.Time); + mappings.AddStructArrayType(DataTypeNames.Time); + + // TimeTz + mappings.AddStructArrayType(DataTypeNames.TimeTz); + // Uuid + mappings.AddStructArrayType(DataTypeNames.Uuid); + + // Hstore + mappings.AddArrayType>("hstore"); + mappings.AddArrayType>("hstore"); + + // UInt internal types + foreach (var dataTypeName in new[] + { + DataTypeNames.Oid, + DataTypeNames.Xid, + DataTypeNames.Cid, + DataTypeNames.RegClass, + DataTypeNames.RegCollation, + DataTypeNames.RegConfig, + DataTypeNames.RegDictionary, + DataTypeNames.RegNamespace, + DataTypeNames.RegOper, + DataTypeNames.RegOperator, + DataTypeNames.RegProc, + DataTypeNames.RegProcedure, + DataTypeNames.RegRole, + DataTypeNames.RegType + }) + { + mappings.AddStructArrayType(dataTypeName); + } + + // Char + mappings.AddStructArrayType(DataTypeNames.Char); + mappings.AddStructArrayType(DataTypeNames.Char); + + // Xid8 + mappings.AddStructArrayType(DataTypeNames.Xid8); + + // Oidvector + mappings.AddArrayType(DataTypeNames.OidVector); + + // Int2vector + mappings.AddArrayType(DataTypeNames.Int2Vector); + + return mappings; + } + + static PgTypeInfo? GetObjectArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, + PgSerializerOptions options) + { + if (elementType != typeof(object)) + return null; + + // Probe if there is any mapping at all for this element type. + var elementId = options.ToCanonicalTypeId(pgElementType); + if (options.GetTypeInfoInternal(null, elementId) is null) + return null; + + var mappings = new TypeInfoMappingCollection(); + mappings.AddProviderType(pgElementType.DataTypeName, + (options, mapping, includeDataTypeName) => mapping.CreateInfo(options, new LateBoundTypeInfoProvider(options, elementId), includeDataTypeName), MatchRequirement.DataTypeName); + mappings.AddProviderArrayType(pgElementType.DataTypeName); + return mappings.Find(type, dataTypeName, options); + } + + static PgTypeInfo? GetEnumArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if ((type is not null && type != typeof(object) && elementType != typeof(string)) + || pgElementType is not PostgresEnumType enumType) + return null; + + var mappings = new TypeInfoMappingCollection(); + mappings.AddType(enumType.DataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding)), MatchRequirement.DataTypeName); + mappings.AddArrayType(enumType.DataTypeName); + return mappings.Find(type, dataTypeName, options); + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..90b872f458 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs @@ -0,0 +1,56 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class CubeTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + const string CubeTypeName = "cube"; + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is { UnqualifiedNameSpan: "cube" or "_cube" } || type == typeof(NpgsqlCube)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.CubeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableCube), + typeof(TBuilder).Name)); + } + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(CubeTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CubeConverter()), isDefault: true); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructArrayType(CubeTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..187748ccd3 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs @@ -0,0 +1,234 @@ +using System; +using System.Collections.Immutable; +using System.Numerics; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class ExtraConversionResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) + { + // Int2 + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + + // Int4 + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + + // Int8 + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // Float4 + mappings.AddStructType(DataTypeNames.Float4, + static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter())); + + // Float8 + mappings.AddStructType(DataTypeNames.Float8, + static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter())); + + // Numeric + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new BigIntegerNumericConverter())); + + // Bytea + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ArraySegmentByteaConverter(supportsTextFormat: false))); + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new MemoryByteaConverter(supportsTextFormat: false))); + + // Varbit + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); + + // Bit + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); + + // Text + // Update PgSerializerOptions.IsWellKnownTextType(Type) after any changes to this list. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateReadOnlyMemoryConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateReadOnlyMemoryConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + } + + // Jsonb + const byte jsonbVersion = 1; + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharArrayTextConverter(options.TextEncoding)))); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, TextConverter.CreateReadOnlyMemoryConverter(options.TextEncoding)))); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new CharArraySegmentTextConverter(options.TextEncoding)))); + + // Hstore + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding, result => result.ToImmutableDictionary()))); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) + { + // Int2 + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + + // Int4 + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + + // Int8 + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + + // Float4 + mappings.AddStructArrayType(DataTypeNames.Float4); + + // Float8 + mappings.AddStructArrayType(DataTypeNames.Float8); + + // Numeric + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + + // Bytea + mappings.AddStructArrayType>(DataTypeNames.Bytea); + mappings.AddStructArrayType>(DataTypeNames.Bytea); + + // Varbit + mappings.AddArrayType(DataTypeNames.Varbit); + + // Bit + mappings.AddArrayType(DataTypeNames.Bit); + + // Text + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + } + + // Jsonb + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + + // Hstore + mappings.AddArrayType>("hstore"); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..272824ad2d --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs @@ -0,0 +1,93 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class FullTextSearchTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "tsquery" or "_tsquery" or "tsvector" or "_tsvector" }) + throw new NotSupportedException( + string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); + + if (type is null) + return; + + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) + type = elementType; + + if (Nullable.GetUnderlyingType(type) is { } underlyingType) + type = underlyingType; + + if (type == typeof(NpgsqlTsVector) || typeof(NpgsqlTsQuery).IsAssignableFrom(type)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); + } + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tsvector + mappings.AddType(DataTypeNames.TsVector, + static (options, mapping, _) => mapping.CreateInfo(options, new TsVectorConverter(options.TextEncoding)), isDefault: true); + + // tsquery + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding)), isDefault: true); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tsvector + mappings.AddArrayType(DataTypeNames.TsVector); + + // tsquery + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..a365434f54 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs @@ -0,0 +1,63 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class GeometricTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(DataTypeNames.Point, + static (options, mapping, _) => mapping.CreateInfo(options, new PointConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Box, + static (options, mapping, _) => mapping.CreateInfo(options, new BoxConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Polygon, + static (options, mapping, _) => mapping.CreateInfo(options, new PolygonConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Line, + static (options, mapping, _) => mapping.CreateInfo(options, new LineConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.LSeg, + static (options, mapping, _) => mapping.CreateInfo(options, new LineSegmentConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Path, + static (options, mapping, _) => mapping.CreateInfo(options, new PathConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Circle, + static (options, mapping, _) => mapping.CreateInfo(options, new CircleConverter()), isDefault: true); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructArrayType(DataTypeNames.Point); + mappings.AddStructArrayType(DataTypeNames.Box); + mappings.AddStructArrayType(DataTypeNames.Polygon); + mappings.AddStructArrayType(DataTypeNames.Line); + mappings.AddStructArrayType(DataTypeNames.LSeg); + mappings.AddStructArrayType(DataTypeNames.Path); + mappings.AddStructArrayType(DataTypeNames.Circle); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..02a456492f --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -0,0 +1,160 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization.Metadata; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.ResolverFactories; + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class JsonDynamicTypeInfoResolverFactory( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null, + JsonSerializerOptions? serializerOptions = null) + : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(jsonbClrTypes, jsonClrTypes, serializerOptions); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(jsonbClrTypes, jsonClrTypes, serializerOptions); + + // Split into a nested class to avoid erroneous trimming/AOT warnings because the JsonDynamicTypeInfoResolverFactory is marked as incompatible. + internal static class Support + { + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName) + { + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "json" or "_json" or "jsonb" or "_jsonb" }) + throw new NotSupportedException( + string.Format( + NpgsqlStrings.DynamicJsonNotEnabled, + type is null || type == typeof(object) ? "" : type.Name, + nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), + typeof(TBuilder).Name)); + } + } + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + class Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + : DynamicTypeInfoResolver, IPgTypeInfoResolver + { + JsonSerializerOptions? _serializerOptions = serializerOptions; + JsonSerializerOptions SerializerOptions => _serializerOptions ??= JsonSerializerOptions.Default; + + readonly Type[] _jsonbClrTypes = jsonbClrTypes ?? []; + readonly Type[] _jsonClrTypes = jsonClrTypes ?? []; + TypeInfoMappingCollection? _mappings; + + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) + { + // We do GetTypeInfo calls directly so we need a resolver. + serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); + + AddUserMappings(jsonb: true, jsonbClrTypes); + AddUserMappings(jsonb: false, jsonClrTypes); + + void AddUserMappings(bool jsonb, Type[] clrTypes) + { + var dynamicMappings = CreateCollection(); + var dataTypeName = (string)(jsonb ? DataTypeNames.Jsonb : DataTypeNames.Json); + foreach (var jsonType in clrTypes) + { + var jsonTypeInfo = serializerOptions.GetTypeInfo(jsonType); + dynamicMappings.AddMapping(jsonTypeInfo.Type, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); + + if (!jsonType.IsValueType && jsonTypeInfo.PolymorphismOptions is not null) + { + foreach (var derived in jsonTypeInfo.PolymorphismOptions.DerivedTypes) + { + // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. + // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `derived.DerivedType` as the base type for the converter, + // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. + var baseType = jsonb && !serializerOptions.AllowOutOfOrderMetadataProperties ? derived.DerivedType : jsonType; + dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, baseType))); + } + } + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + + return mappings; + } + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. + if (type is null || type == typeof(object) || PgSerializerOptions.IsWellKnownTextType(type) + || dataTypeName != DataTypeNames.Jsonb && dataTypeName != DataTypeNames.Json) + return null; + + var matchedType = Nullable.GetUnderlyingType(type) ?? type; + + return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + + // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. + // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `mapping.Type` as the base type for the converter, + // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. + var baseType = jsonb && !SerializerOptions.AllowOutOfOrderMetadataProperties ? mapping.Type : typeof(object); + + return mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); + }); + } + + static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions, Type baseType) + => (PgConverter)Activator.CreateInstance( + typeof(JsonConverter<,>).MakeGenericType(valueType, baseType), + jsonb, + textEncoding, + serializerOptions)!; + } + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + sealed class ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + : Resolver(jsonbClrTypes, jsonClrTypes, serializerOptions), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), base.Mappings); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options) + ?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) + : null; + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + { + if (baseMappings.Items.Count == 0) + return mappings; + + var dynamicMappings = CreateCollection(baseMappings); + foreach (var mapping in baseMappings.Items) + { + // Always handle Nullable mappings as part of the underlying type. + if (Nullable.GetUnderlyingType(mapping.Type) is not null) + continue; + dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..6e926e49ef --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs @@ -0,0 +1,112 @@ +using System; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Text.Json.Serialization.Metadata; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class JsonTypeInfoResolverFactory(JsonSerializerOptions? serializerOptions = null) : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(serializerOptions); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(serializerOptions); + + class Resolver : IPgTypeInfoResolver + { + static JsonSerializerOptions? DefaultSerializerOptions; + + readonly JsonSerializerOptions _serializerOptions; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _serializerOptions); + + public Resolver(JsonSerializerOptions? serializerOptions = null) + { + if (serializerOptions is null) + { + serializerOptions = DefaultSerializerOptions; + if (serializerOptions is null) + { + serializerOptions = new JsonSerializerOptions(); + serializerOptions.TypeInfoResolver = new BasicJsonTypeInfoResolver(); + DefaultSerializerOptions = serializerOptions; + } + } + + _serializerOptions = serializerOptions; + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, JsonSerializerOptions serializerOptions) + { + // Jsonb is the first default for JsonDocument + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, + new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddStructType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, + new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + } + + return mappings; + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver + { + public JsonTypeInfo? GetTypeInfo(Type type, JsonSerializerOptions options) + { + if (type == typeof(JsonDocument)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonDocumentConverter); + if (type == typeof(JsonElement)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonElementConverter); + if (type == typeof(JsonObject)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonObjectConverter); + if (type == typeof(JsonArray)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonArrayConverter); + if (type == typeof(JsonValue)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonValueConverter); + if (type == typeof(JsonNode)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonNodeConverter); + return null; + } + } + } + + sealed class ArrayResolver(JsonSerializerOptions? serializerOptions = null) : Resolver(serializerOptions), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..13d1f51cc4 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs @@ -0,0 +1,66 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class LTreeTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is { UnqualifiedNameSpan: "ltree" or "_ltree" or "lquery" or "_lquery" or "ltxtquery" or "_ltxtquery" }) + throw new NotSupportedException( + string.Format(NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), + typeof(TBuilder).Name)); + } + + class Resolver : IPgTypeInfoResolver + { + const byte LTreeVersion = 1; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddType("ltree", + static (options, mapping, _) => mapping.CreateInfo(options, + new VersionPrefixedTextConverter(LTreeVersion, TextConverter.CreateStringConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + mappings.AddType("lquery", + static (options, mapping, _) => mapping.CreateInfo(options, + new VersionPrefixedTextConverter(LTreeVersion, TextConverter.CreateStringConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + mappings.AddType("ltxtquery", + static (options, mapping, _) => mapping.CreateInfo(options, + new VersionPrefixedTextConverter(LTreeVersion, TextConverter.CreateStringConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType("ltree"); + mappings.AddArrayType("lquery"); + mappings.AddArrayType("ltxtquery"); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..3cd2d14354 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -0,0 +1,85 @@ +using System; +using System.Net; +using System.Net.NetworkInformation; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class NetworkTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // macaddr + mappings.AddType(DataTypeNames.MacAddr, + static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: false)), isDefault: true); + mappings.AddType(DataTypeNames.MacAddr8, + static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: true)), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); + + // inet + // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). + mappings.AddType(DataTypeNames.Inet, + static (options, mapping, _) => new PgConcreteTypeInfo(options, new IPAddressConverter(), new DataTypeName(mapping.DataTypeName), + requestedType: mapping.Type), + mapping => mapping with + { + MatchRequirement = MatchRequirement.Single, + TypeMatchPredicate = type => type is null || typeof(IPAddress).IsAssignableFrom(type) + }); + mappings.AddStructType(DataTypeNames.Inet, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlInetConverter())); + + // cidr + mappings.AddStructType(DataTypeNames.Cidr, + static (options, mapping, _) => mapping.CreateInfo(options, new IPNetworkConverter()), isDefault: true); + +#pragma warning disable CS0618 // NpgsqlCidr is obsolete + mappings.AddStructType(DataTypeNames.Cidr, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter())); +#pragma warning restore CS0618 + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // macaddr + mappings.AddArrayType(DataTypeNames.MacAddr); + mappings.AddArrayType(DataTypeNames.MacAddr8); + + // inet + mappings.AddArrayType(DataTypeNames.Inet); + mappings.AddStructArrayType(DataTypeNames.Inet); + + // cidr + mappings.AddStructArrayType(DataTypeNames.Cidr); +#pragma warning disable CS0618 // NpgsqlCidr is obsolete + mappings.AddStructArrayType(DataTypeNames.Cidr); +#pragma warning restore CS0618 + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..eb7de18a1f --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs @@ -0,0 +1,59 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class RecordTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "record" or "_record" }) + { + throw new NotSupportedException( + string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples), + typeof(TBuilder).Name, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords))); + } + } + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => + mapping.CreateInfo(options, new RecordConverter(options), supportsWriting: false), + MatchRequirement.DataTypeName); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType(DataTypeNames.Record); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..551c2836b9 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs @@ -0,0 +1,75 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +[RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] +[RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] +sealed class TupledRecordTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + [RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] + [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] + class Resolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (dataTypeName != DataTypeNames.Record || type is null || !IsTypeOrNullableOfType(type, + static type => type is { IsConstructedGenericType: true, FullName: not null } && + (type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) || + type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal)), out var matchedType)) + return null; + + return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => + { + var constructors = mapping.Type.GetConstructors(); + ConstructorInfo? constructor = null; + if (constructors.Length is 1) + constructor = constructors[0]; + else + { + var args = mapping.Type.GenericTypeArguments.Length; + foreach (var ctor in constructors) + if (ctor.GetParameters().Length == args) + { + constructor = ctor; + break; + } + } + + if (constructor is null) + throw new InvalidOperationException($"Couldn't find a suitable constructor for record type: {mapping.Type.FullName}"); + + var factory = typeof(Resolver).GetMethod(nameof(CreateFactory), BindingFlags.Static | BindingFlags.NonPublic)! + .MakeGenericMethod(mapping.Type) + .Invoke(null, [constructor, constructor.GetParameters().Length]); + + var converterType = typeof(RecordConverter<>).MakeGenericType(mapping.Type); + var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; + return mapping.CreateInfo(options, converter, supportsWriting: false); + }); + } + + static Func CreateFactory(ConstructorInfo constructor, int constructorParameters) => array => + { + if (array.Length != constructorParameters) + throw new InvalidCastException($"Cannot read record type with {array.Length} fields as {typeof(T)}"); + return (T)constructor.Invoke(array); + }; + } + + [RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] + [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] + sealed class ArrayResolver : Resolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) + : null; + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..98e285a887 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -0,0 +1,187 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +[RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] +[RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] +sealed class UnmappedTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new EnumResolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new EnumArrayResolver(); + + public override IPgTypeInfoResolver CreateRangeResolver() => new RangeResolver(); + public override IPgTypeInfoResolver CreateRangeArrayResolver() => new RangeArrayResolver(); + + public override IPgTypeInfoResolver? CreateMultirangeResolver() => new MultirangeResolver(); + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + class EnumResolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is null || !IsTypeOrNullableOfType(type, static type => type.IsEnum, out var matchedType) || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) + return null; + + return CreateCollection().AddMapping(matchedType, dataTypeName, static (options, mapping, _) => + { + var enumToLabel = new Dictionary(); + var labelToEnum = new Dictionary(); + foreach (var field in mapping.Type.GetFields(BindingFlags.Static | BindingFlags.Public)) + { + var attribute = (PgNameAttribute?)field.GetCustomAttribute(typeof(PgNameAttribute), false); + var enumName = attribute?.PgName ?? options.DefaultNameTranslator.TranslateMemberName(field.Name); + var enumValue = (Enum)field.GetValue(null)!; + + enumToLabel[enumValue] = enumName; + labelToEnum[enumName] = enumValue; + } + + return mapping.CreateInfo(options, (PgConverter)Activator.CreateInstance(typeof(EnumConverter<>).MakeGenericType(mapping.Type), + enumToLabel, labelToEnum, + options.TextEncoding)!); + }); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + sealed class EnumArrayResolver : EnumResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) + : null; + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + class RangeResolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + var matchedType = type; + if ((type is not null && type != typeof(object) && !IsTypeOrNullableOfType(type, + static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), + out matchedType)) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresRangeType rangeType) + return null; + + // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) + var subInfo = options.GetTypeInfoInternal( + matchedType is null ? null : matchedType == typeof(object) ? matchedType : matchedType.GetGenericArguments()[0], + options.ToCanonicalTypeId(rangeType.Subtype.GetRepresentationalType())); + + // We have no generic range-specific PgConcreteTypeInfoProvider so we would not know how to compose a range mapping for provider-backed sub-infos. + // See https://github.com/npgsql/npgsql/issues/5268 + if (subInfo is not PgConcreteTypeInfo) + return null; + + subInfo = subInfo.ToExactTypeInfo(); + var subConcrete = (PgConcreteTypeInfo)subInfo; + + var converterType = typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); + + return CreateCollection().AddMapping(matchedType ?? converterType, dataTypeName, + (options, mapping, _) => + new PgConcreteTypeInfo( + options, + (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), + ((PgConcreteTypeInfo)subInfo).Converter)!, + new DataTypeName(mapping.DataTypeName), + requestedType: matchedType + ) { PreferredFormat = subConcrete.PreferredFormat, SupportsWriting = subConcrete.SupportsWriting }, + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + sealed class RangeArrayResolver : RangeResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if ((type is not null && type != typeof(object) && !IsArrayLikeType(type, out elementType)) + || !IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName)) + return null; + + var mappings = base.GetMappings(elementType, elementDataTypeName, options); + + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + + if (elementType is not null && Nullable.GetUnderlyingType(elementType) is { } underlyingType) + elementType = underlyingType; + + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + class MultirangeResolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if ((type is not null && type != typeof(object) && !IsArrayLikeType(type, out elementType)) + || elementType is not null && !IsTypeOrNullableOfType(elementType, + static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out _) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresMultirangeType multirangeType) + return null; + + var subInfo = options.GetTypeInfoInternal(type is null ? null : elementType ?? typeof(object), options.ToCanonicalTypeId(multirangeType.Subrange)); + + // We have no generic multirange-specific PgConcreteTypeInfoProvider so we would not know how to compose a multirange mapping for provider-backed sub-infos. + // See https://github.com/npgsql/npgsql/issues/5268 + if (subInfo is not PgConcreteTypeInfo) + return null; + + subInfo = subInfo.ToExactTypeInfo(); + var subConcrete = (PgConcreteTypeInfo)subInfo; + + var converterType = subInfo.Type.MakeArrayType(); + + return CreateCollection().AddMapping(type ?? converterType, dataTypeName, + (options, mapping, _) => + new PgConcreteTypeInfo( + options, + (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(converterType, subInfo.Type), + ((PgConcreteTypeInfo)subInfo).Converter)!, + new DataTypeName(mapping.DataTypeName), + requestedType: type + ) { PreferredFormat = subConcrete.PreferredFormat, SupportsWriting = subConcrete.SupportsWriting }, + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + sealed class MultirangeArrayResolver : MultirangeResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + var elementType = type == typeof(object) ? type : null; + if ((type is not null && type != typeof(object) && !IsArrayLikeType(type, out elementType)) + || !IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName)) + return null; + + var mappings = base.GetMappings(elementType, elementDataTypeName, options); + + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + + if (elementType is not null && Nullable.GetUnderlyingType(elementType) is { } underlyingType) + elementType = underlyingType; + + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs new file mode 100644 index 0000000000..efcc4633ba --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -0,0 +1,71 @@ +using System; +using System.Collections; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Properties; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver +{ + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (options.IntrospectionMode) + return null; + + RecordTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + FullTextSearchTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + LTreeTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + CubeTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + + JsonDynamicTypeInfoResolverFactory.Support.ThrowIfUnsupported(type, dataTypeName); + + switch (dataTypeName is null ? null : options.DatabaseInfo.GetPostgresType(dataTypeName.GetValueOrDefault())) + { + case PostgresEnumType: + // Unmapped enum types never work on object or default. + if (type is not null && type != typeof(object)) + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedEnumsNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + typeof(TBuilder).Name)); + break; + + case PostgresRangeType when !options.RangesEnabled: + throw new NotSupportedException( + string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); + case PostgresRangeType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + typeof(TBuilder).Name)); + + case PostgresMultirangeType when !options.MultirangesEnabled: + throw new NotSupportedException( + string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); + case PostgresMultirangeType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + typeof(TBuilder).Name)); + + case PostgresArrayType when !options.ArraysEnabled: + throw new NotSupportedException( + string.Format(NpgsqlStrings.ArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); + } + + if (type is not null) + { + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) && TypeInfoMappingCollection.IsArrayLikeType(elementType, out _)) + throw new NotSupportedException("Writing is not supported for jagged collections, use a multidimensional array instead."); + + if (typeof(IEnumerable).IsAssignableFrom(type) && !typeof(IList).IsAssignableFrom(type) && type != typeof(string) && (dataTypeName is null || dataTypeName.Value.IsArray)) + throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or some implementation of IList instead."); + } + + return null; + } +} diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs new file mode 100644 index 0000000000..299f2bb229 --- /dev/null +++ b/src/Npgsql/Internal/Size.cs @@ -0,0 +1,99 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public enum SizeKind +{ + Unknown = 0, + Exact, + UpperBound +} + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +[DebuggerDisplay("{DebuggerDisplay,nq}")] +public readonly struct Size : IEquatable +{ + readonly int _value; + readonly SizeKind _kind; + + Size(SizeKind kind, int value) + { + _value = value; + _kind = kind; + } + + public int Value + { + get + { + if (_kind is SizeKind.Unknown) + ThrowHelper.ThrowInvalidOperationException("Cannot get value from default or Unknown kind"); + return _value; + } + } + + internal int GetValueOrDefault() => _value; + + public SizeKind Kind => _kind; + + public static Size Create(int byteCount) => new(SizeKind.Exact, byteCount); + public static Size CreateUpperBound(int byteCount) => new(SizeKind.UpperBound, byteCount); + public static Size Unknown { get; } = new(SizeKind.Unknown, 0); + public static Size Zero { get; } = new(SizeKind.Exact, 0); + + public bool TryCombine(Size other, out Size result) + { + if (_kind is SizeKind.Unknown || other._kind is SizeKind.Unknown) + { + result = Unknown; + return true; + } + + var sum = unchecked(_value + other._value); + if ((_value >= 0 && sum < other._value) || (_value < 0 && sum > other._value)) + { + result = default; + return false; + } + + if (_kind is SizeKind.UpperBound || other._kind is SizeKind.UpperBound) + { + result = CreateUpperBound(sum); + return true; + } + + result = Create(sum); + return true; + } + + public Size Combine(Size other) + { + if (_kind is SizeKind.Unknown || other._kind is SizeKind.Unknown) + return Unknown; + + if (_kind is SizeKind.UpperBound || other._kind is SizeKind.UpperBound) + return CreateUpperBound(checked(_value + other._value)); + + return Create(checked(_value + other._value)); + } + + public static implicit operator Size(int value) => Create(value); + + string DebuggerDisplay => ToString(); + + public bool Equals(Size other) => _value == other._value && _kind == other.Kind; + public override bool Equals(object? obj) => obj is Size other && Equals(other); + public override int GetHashCode() => HashCode.Combine(_value, (int)_kind); + public static bool operator ==(Size left, Size right) => left.Equals(right); + public static bool operator !=(Size left, Size right) => !left.Equals(right); + + public override string ToString() => _kind switch + { + SizeKind.Exact or SizeKind.UpperBound => $"{_value} ({_kind.ToString()})", + SizeKind.Unknown => nameof(SizeKind.Unknown), + _ => throw new ArgumentOutOfRangeException() + }; +} diff --git a/src/Npgsql/Internal/TransportSecurityHandler.cs b/src/Npgsql/Internal/TransportSecurityHandler.cs new file mode 100644 index 0000000000..5776bcf993 --- /dev/null +++ b/src/Npgsql/Internal/TransportSecurityHandler.cs @@ -0,0 +1,40 @@ +using System; +using System.Security.Cryptography.X509Certificates; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Properties; +using Npgsql.Util; + +namespace Npgsql.Internal; + +class TransportSecurityHandler +{ + public virtual bool SupportEncryption => false; + + public virtual Func? RootCertificatesCallback + { + get => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); + set => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); + } + + public virtual Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, CancellationToken cancellationToken) + => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); + + public virtual void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, + ref bool successfulBind) + => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); +} + +sealed class RealTransportSecurityHandler : TransportSecurityHandler +{ + public override bool SupportEncryption => true; + + public override Func? RootCertificatesCallback { get; set; } + + public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, CancellationToken cancellationToken) + => connector.NegotiateEncryption(sslMode, timeout, async, cancellationToken); + + public override void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, + ref bool successfulBind) + => connector.AuthenticateSASLSha256Plus(ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); +} diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs deleted file mode 100644 index 742f503726..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ /dev/null @@ -1,516 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Linq.Expressions; -using System.Reflection; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Non-generic base class for all type handlers which handle PostgreSQL arrays. -/// Extend from instead. -/// -/// -/// https://www.postgresql.org/docs/current/static/arrays.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public abstract class ArrayHandler : NpgsqlTypeHandler -{ - private protected int LowerBound { get; } // The lower bound value sent to the backend when writing arrays. Normally 1 (the PG default) but is 0 for OIDVector. - private protected NpgsqlTypeHandler ElementHandler { get; } - private protected ArrayNullabilityMode ArrayNullabilityMode { get; } - - static readonly MethodInfo ReadArrayMethod = typeof(ArrayHandler).GetMethod(nameof(ReadArray), BindingFlags.NonPublic | BindingFlags.Instance)!; - static readonly MethodInfo ReadListMethod = typeof(ArrayHandler).GetMethod(nameof(ReadList), BindingFlags.NonPublic | BindingFlags.Instance)!; - - /// - protected ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) - : base(arrayPostgresType) - { - LowerBound = lowerBound; - ElementHandler = elementHandler; - ArrayNullabilityMode = arrayNullabilityMode; - } - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Array); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(Array); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); - - #region Read - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ArrayTypeInfo.ReadArrayFunc(this, buf, async); - - if (ArrayTypeInfo.IsList) - return await ArrayTypeInfo.ReadListFunc(this, buf, async); - - throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TRequestedArray).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TRequestedArray).Name}" - ); - } - - /// - /// Reads an array of element type from the given buffer . - /// - protected async ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0, bool readAsObject = false) - { - await buf.Ensure(12, async); - var dimensions = buf.ReadInt32(); - var containsNulls = buf.ReadInt32() == 1; - buf.ReadUInt32(); // Element OID. Ignored. - - var returnType = readAsObject - ? ArrayNullabilityMode switch - { - ArrayNullabilityMode.Never => ElementTypeInfo.IsNonNullable && containsNulls - ? throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage) - : typeof(TRequestedElement), - ArrayNullabilityMode.Always => ElementTypeInfo.NullableElementType, - ArrayNullabilityMode.PerInstance => containsNulls - ? ElementTypeInfo.NullableElementType - : typeof(TRequestedElement), - _ => throw new ArgumentOutOfRangeException() - } - : ElementTypeInfo.IsNonNullable && containsNulls - ? throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage) - : typeof(TRequestedElement); - - if (dimensions == 0) - return expectedDimensions > 1 - ? Array.CreateInstance(returnType, new int[expectedDimensions]) - : returnType == typeof(TRequestedElement) - ? Array.Empty() - : Array.CreateInstance(returnType, 0); - - if (expectedDimensions > 0 && dimensions != expectedDimensions) - throw new InvalidOperationException($"Cannot read an array with {expectedDimensions} dimension(s) from an array with {dimensions} dimension(s)"); - - if (dimensions == 1 && returnType == typeof(TRequestedElement)) - { - await buf.Ensure(8, async); - var arrayLength = buf.ReadInt32(); - - buf.ReadInt32(); // Lower bound - - var oneDimensional = new TRequestedElement[arrayLength]; - for (var i = 0; i < oneDimensional.Length; i++) - oneDimensional[i] = await ElementHandler.ReadWithLength(buf, async); - - return oneDimensional; - } - - var dimLengths = new int[dimensions]; - await buf.Ensure(dimensions * 8, async); - - for (var i = 0; i < dimLengths.Length; i++) - { - dimLengths[i] = buf.ReadInt32(); - buf.ReadInt32(); // Lower bound - } - - var result = Array.CreateInstance(returnType, dimLengths); - - // Either multidimensional arrays or arrays of nullable value types requested as object - // We can't avoid boxing here - var indices = new int[dimensions]; - while (true) - { - await buf.Ensure(4, async); - var len = buf.ReadInt32(); - var element = len == -1 - ? (object?)null - : NullableHandler.Exists - ? await NullableHandler.ReadAsync!(ElementHandler, buf, len, async) - : await ElementHandler.Read(buf, len, async); - - result.SetValue(element, indices); - - // TODO: Overly complicated/inefficient... - indices[dimensions - 1]++; - for (var dim = dimensions - 1; dim >= 0; dim--) - { - if (indices[dim] <= result.GetUpperBound(dim)) - continue; - - if (dim == 0) - return result; - - for (var j = dim; j < dimensions; j++) - indices[j] = result.GetLowerBound(j); - indices[dim - 1]++; - } - } - } - - /// - /// Reads a generic list containing elements of type from the given buffer . - /// - protected async ValueTask> ReadList(NpgsqlReadBuffer buf, bool async) - { - await buf.Ensure(12, async); - var dimensions = buf.ReadInt32(); - var containsNulls = buf.ReadInt32() == 1; - buf.ReadUInt32(); // Element OID. Ignored. - - if (dimensions == 0) - return new List(); - if (dimensions > 1) - throw new NotSupportedException($"Can't read multidimensional array as List<{typeof(TRequestedElement).Name}>"); - if (ElementTypeInfo.IsNonNullable && containsNulls) - throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage); - - await buf.Ensure(8, async); - var length = buf.ReadInt32(); - buf.ReadInt32(); // We don't care about the lower bounds - - var list = new List(length); - for (var i = 0; i < length; i++) - list.Add(await ElementHandler.ReadWithLength(buf, async)); - return list; - } - - internal const string ReadNonNullableCollectionWithNullsExceptionMessage = - "Cannot read a non-nullable collection of elements because the returned array contains nulls. " + - "Call GetFieldValue with a nullable array instead."; - - #endregion Read - - #region Static generic caching helpers - - internal static class ElementTypeInfo - { - public static readonly bool IsNonNullable = - typeof(TElement).IsValueType && Nullable.GetUnderlyingType(typeof(TElement)) is null; - - public static readonly Type NullableElementType = IsNonNullable - ? typeof(Nullable<>).MakeGenericType(typeof(TElement)) - : typeof(TElement); - } - - internal static class ArrayTypeInfo - { - // ReSharper disable StaticMemberInGenericType - public static readonly bool IsArray; - public static readonly bool IsList; - public static readonly Type? ElementType; - - public static readonly Func> ReadArrayFunc = default!; - public static readonly Func> ReadListFunc = default!; - // ReSharper restore StaticMemberInGenericType - - static ArrayTypeInfo() - { - var type = typeof(TArrayOrList); - IsArray = type.IsArray; - IsList = type.IsGenericType && type.GetGenericTypeDefinition() == typeof(List<>); - - ElementType = IsArray - ? type.GetElementType() - : IsList - ? type.GetGenericArguments()[0] - : null; - - if (ElementType == null) - return; - - // Initialize delegates - var arrayHandlerParam = Expression.Parameter(typeof(ArrayHandler), "arrayHandler"); - var bufferParam = Expression.Parameter(typeof(NpgsqlReadBuffer), "buf"); - var asyncParam = Expression.Parameter(typeof(bool), "async"); - - if (IsArray) - { - ReadArrayFunc = Expression - .Lambda>>( - Expression.Call( - arrayHandlerParam, - ReadArrayMethod.MakeGenericMethod(ElementType), - bufferParam, asyncParam, Expression.Constant(type.GetArrayRank()), Expression.Constant(false, typeof(bool))), - arrayHandlerParam, bufferParam, asyncParam) - .Compile(); - } - - if (IsList) - { - ReadListFunc = Expression - .Lambda>>( - Expression.Call( - arrayHandlerParam, - ReadListMethod.MakeGenericMethod(ElementType), - bufferParam, asyncParam), - arrayHandlerParam, bufferParam, asyncParam) - .Compile(); - } - } - } - - #endregion Static generic caching helpers -} - -/// -/// Base class for all type handlers which handle PostgreSQL arrays. -/// -/// -/// https://www.postgresql.org/docs/current/static/arrays.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class ArrayHandler : ArrayHandler -{ - /// - public ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) - : base(arrayPostgresType, elementHandler, arrayNullabilityMode, lowerBound) {} - - #region Read - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await ReadArray(buf, async, readAsObject: true); - - #endregion - - #region Write - - static Exception MixedTypesOrJaggedArrayException(Exception innerException) - => new("While trying to write an array, one of its elements failed validation. " + - "You may be trying to mix types in a non-generic IList, or to write a jagged array.", innerException); - - static Exception CantWriteTypeException(Type type) - => new InvalidCastException($"Can't write type {type} as an array of {typeof(TElement)}"); - - // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), - // we must use the bang operator here to tell the compiler that a null value will never be returned. - - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache); - - /// - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value is null || value == DBNull.Value - ? 0 - : ValidateAndGetLength(value!, ref lengthCache); - - int ValidateAndGetLength(object value, ref NpgsqlLengthCache? lengthCache) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - if (value is ICollection generic) - return ValidateAndGetLengthGeneric(generic, ref lengthCache); - if (value is ICollection nonGeneric) - return ValidateAndGetLengthNonGeneric(nonGeneric, ref lengthCache); - throw CantWriteTypeException(value.GetType()); - } - - // Handle single-dimensional arrays and generic IList - int ValidateAndGetLengthGeneric(ICollection value, ref NpgsqlLengthCache lengthCache) - { - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - 1 * 8 + // number of dimensions (1) * (length + lower bound) - 4 * value.Count; // sum of element lengths - - lengthCache.Set(0); - var elemLengthCache = lengthCache; - - foreach (var element in value) - { - if (element is null) - continue; - - try - { - len += ElementHandler.ValidateAndGetLength(element, ref elemLengthCache, null); - } - catch (Exception e) - { - throw MixedTypesOrJaggedArrayException(e); - } - } - - lengthCache.Lengths[pos] = len; - return len; - } - - // Take care of multi-dimensional arrays and non-generic IList, we have no choice but to box/unbox - int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLengthCache lengthCache) - { - var asMultidimensional = value as Array; - var dimensions = asMultidimensional?.Rank ?? 1; - - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - dimensions * 8 + // number of dimensions * (length + lower bound) - 4 * value.Count; // sum of element lengths - - lengthCache.Set(0); - NpgsqlLengthCache? elemLengthCache = lengthCache; - - foreach (var element in value) - { - if (element is null) - continue; - - try - { - len += ElementHandler.ValidateObjectAndGetLength(element, ref elemLengthCache, null); - } - catch (Exception e) - { - throw MixedTypesOrJaggedArrayException(e); - } - } - - lengthCache.Lengths[pos] = len; - return len; - } - - protected override Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - - if (value is ICollection list) - return WriteGeneric(list, buf, lengthCache, async, cancellationToken); - - if (value is ICollection nonGeneric) - return WriteNonGeneric(nonGeneric, buf, lengthCache, async, cancellationToken); - - throw CantWriteTypeException(value.GetType()); - } - - // The default WriteObjectWithLength casts the type handler to INpgsqlTypeHandler, but that's not sufficient for - // us (need to handle many types of T, e.g. int[], int[,]...) - /// - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value is null || value is DBNull - ? WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken) - : WriteWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - - async Task WriteGeneric(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - 1 * 8; // number of dimensions (1) * (length + lower bound) - if (buf.WriteSpaceLeft < len) - { - await buf.Flush(async, cancellationToken); - Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); - } - - buf.WriteInt32(1); - buf.WriteInt32(1); // has_nulls = 1. Not actually used by the backend. - buf.WriteUInt32(ElementHandler.PostgresType.OID); - buf.WriteInt32(value.Count); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG - - foreach (var element in value) - await ElementHandler.WriteWithLength(element, buf, lengthCache, null, async, cancellationToken); - } - - async Task WriteNonGeneric(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - var asArray = value as Array; - var dimensions = asArray?.Rank ?? 1; - - var len = - 4 + // ndim - 4 + // has_nulls - 4 + // element_oid - dimensions * 8; // dim (4) + lBound (4) - - if (buf.WriteSpaceLeft < len) - { - await buf.Flush(async, cancellationToken); - Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); - } - - buf.WriteInt32(dimensions); - buf.WriteInt32(1); // HasNulls=1. Not actually used by the backend. - buf.WriteUInt32(ElementHandler.PostgresType.OID); - if (asArray != null) - { - for (var i = 0; i < dimensions; i++) - { - buf.WriteInt32(asArray.GetLength(i)); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG - } - } - else - { - buf.WriteInt32(value.Count); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG - } - - foreach (var element in value) - await ElementHandler.WriteObjectWithLength(element, buf, lengthCache, null, async, cancellationToken); - } - - #endregion -} - -/// -/// https://www.postgresql.org/docs/current/static/arrays.html -/// -/// The .NET type contained as an element within this array -/// The .NET provider-specific type contained as an element within this array -sealed class ArrayHandlerWithPsv : ArrayHandler -{ - public ArrayHandlerWithPsv(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) - : base(arrayPostgresType, elementHandler, arrayNullabilityMode) { } - - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.ElementType == typeof(TElementPsv)) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ReadArray(buf, async, typeof(TRequestedArray).GetArrayRank()); - - if (ArrayTypeInfo.IsList) - return (TRequestedArray)(object)await ReadList(buf, async); - } - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => ReadPsvAsObject(buf, len, false, fieldDescription).GetAwaiter().GetResult(); - - internal override async ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await ReadArray(buf, async, readAsObject: true); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs deleted file mode 100644 index 7ffa97b0c1..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ /dev/null @@ -1,297 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Specialized; -using System.Diagnostics; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL bit string data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-bit.html. -/// -/// Note that for BIT(1), this handler will return a bool by default, to align with SQLClient -/// (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class BitStringHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler -{ - public BitStringHandler(PostgresType pgType) : base(pgType) {} - - public override Type GetFieldType(FieldDescription? fieldDescription = null) - => fieldDescription != null && fieldDescription.TypeModifier == 1 ? typeof(bool) : typeof(BitArray); - - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) - => GetFieldType(fieldDescription); - - // BitString requires a special array handler which returns bool or BitArray - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new BitStringArrayHandler(pgArrayType, this, arrayNullabilityMode); - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numBits = buf.ReadInt32(); - var result = new BitArray(numBits); - var bytesLeft = len - 4; // Remove leading number of bits - if (bytesLeft == 0) - return result; - - var bitNo = 0; - while (true) - { - var iterationEndPos = bytesLeft > buf.ReadBytesLeft - ? bytesLeft - buf.ReadBytesLeft - : 1; - - for (; bytesLeft > iterationEndPos; bytesLeft--) - { - // ReSharper disable ShiftExpressionRealShiftCountIsZero - var chunk = buf.ReadByte(); - result[bitNo++] = (chunk & (1 << 7)) != 0; - result[bitNo++] = (chunk & (1 << 6)) != 0; - result[bitNo++] = (chunk & (1 << 5)) != 0; - result[bitNo++] = (chunk & (1 << 4)) != 0; - result[bitNo++] = (chunk & (1 << 3)) != 0; - result[bitNo++] = (chunk & (1 << 2)) != 0; - result[bitNo++] = (chunk & (1 << 1)) != 0; - result[bitNo++] = (chunk & (1 << 0)) != 0; - } - - if (bytesLeft == 1) - break; - - Debug.Assert(buf.ReadBytesLeft == 0); - await buf.Ensure(Math.Min(bytesLeft, buf.Size), async); - } - - if (bitNo < result.Length) - { - var remainder = result.Length - bitNo; - await buf.Ensure(1, async); - var lastChunk = buf.ReadByte(); - for (var i = 7; i >= 8 - remainder; i--) - result[bitNo++] = (lastChunk & (1 << i)) != 0; - } - - return result; - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - if (len > 4 + 4) - throw new InvalidCastException("Can't read PostgreSQL bitstring with more than 32 bits into BitVector32"); - - await buf.Ensure(4 + 4, async); - - var numBits = buf.ReadInt32(); - return numBits == 0 - ? new BitVector32(0) - : new BitVector32(buf.ReadInt32()); - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(5, async); - var bitLen = buf.ReadInt32(); - if (bitLen != 1) - throw new InvalidCastException("Can't convert a BIT(N) type to bool, only BIT(1)"); - var b = buf.ReadByte(); - return (b & 128) != 0; - } - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing string to PostgreSQL bitstring is supported, no reading."); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => fieldDescription?.TypeModifier == 1 - ? await Read(buf, len, async, fieldDescription) - : await Read(buf, len, async, fieldDescription); - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(BitArray value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 4 + (value.Length + 7) / 8; - - /// - public int ValidateAndGetLength(BitVector32 value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Data == 0 ? 4 : 8; - - /// - public int ValidateAndGetLength(bool value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 5; - - /// - public int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value.Any(c => c != '0' && c != '1')) - throw new FormatException("Cannot interpret as ASCII BitString: " + value); - return 4 + (value.Length + 7) / 8; - } - - /// - public override async Task Write(BitArray value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // Initial bitlength byte - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Length); - - var byteLen = (value.Length + 7) / 8; - var pos = 0; - while (true) - { - var endPos = pos + Math.Min(byteLen - pos, buf.WriteSpaceLeft); - for (; pos < endPos; pos++) - { - var bitPos = pos*8; - var b = 0; - for (var i = 0; i < Math.Min(8, value.Length - bitPos); i++) - b += (value[bitPos + i] ? 1 : 0) << (8 - i - 1); - buf.WriteByte((byte)b); - } - - if (pos == byteLen) - return; - await buf.Flush(async, cancellationToken); - } - } - - /// - public async Task Write(BitVector32 value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 8) - await buf.Flush(async, cancellationToken); - - if (value.Data == 0) - buf.WriteInt32(0); - else - { - buf.WriteInt32(32); - buf.WriteInt32(value.Data); - } - } - - /// - public async Task Write(bool value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 5) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(1); - buf.WriteByte(value ? (byte)0x80 : (byte)0); - } - - /// - public async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // Initial bitlength byte - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Length); - - var pos = 0; - var byteLen = (value.Length + 7) / 8; - var bytePos = 0; - - while (true) - { - var endBytePos = bytePos + Math.Min(byteLen - bytePos - 1, buf.WriteSpaceLeft); - - for (; bytePos < endBytePos; bytePos++) - { - var b = 0; - b += (value[pos++] - '0') << 7; - b += (value[pos++] - '0') << 6; - b += (value[pos++] - '0') << 5; - b += (value[pos++] - '0') << 4; - b += (value[pos++] - '0') << 3; - b += (value[pos++] - '0') << 2; - b += (value[pos++] - '0') << 1; - b += (value[pos++] - '0'); - buf.WriteByte((byte)b); - } - - if (bytePos >= byteLen - 1) - break; - await buf.Flush(async, cancellationToken); - } - - if (pos < value.Length) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - var remainder = value.Length - pos; - var lastChunk = 0; - for (var i = 7; i >= 8 - remainder; i--) - lastChunk += (value[pos++] - '0') << i; - buf.WriteByte((byte)lastChunk); - } - } - - #endregion -} - -/// -/// A special handler for arrays of bit strings. -/// Differs from the standard array handlers in that it returns arrays of bool for BIT(1) and arrays -/// of BitArray otherwise (just like the scalar BitStringHandler does). -/// -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class BitStringArrayHandler : ArrayHandler -{ - /// - public BitStringArrayHandler(PostgresType postgresType, BitStringHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) - : base(postgresType, elementHandler, arrayNullabilityMode) {} - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.ElementType == typeof(BitArray)) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ReadArray(buf, async); - - if (ArrayTypeInfo.IsList) - return (TRequestedArray)(object)await ReadList(buf, async); - } - - if (ArrayTypeInfo.ElementType == typeof(bool)) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ReadArray(buf, async); - - if (ArrayTypeInfo.IsList) - return (TRequestedArray)(object)await ReadList(buf, async); - } - - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => fieldDescription?.TypeModifier == 1 - ? await ReadArray(buf, async) - : await ReadArray(buf, async); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/BoolHandler.cs b/src/Npgsql/Internal/TypeHandlers/BoolHandler.cs deleted file mode 100644 index c33004c701..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/BoolHandler.cs +++ /dev/null @@ -1,32 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL bool data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-boolean.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class BoolHandler : NpgsqlSimpleTypeHandler -{ - public BoolHandler(PostgresType pgType) : base(pgType) {} - - /// - public override bool Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadByte() != 0; - - /// - public override int ValidateAndGetLength(bool value, NpgsqlParameter? parameter) - => 1; - - /// - public override void Write(bool value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteByte(value ? (byte)1 : (byte)0); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs b/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs deleted file mode 100644 index e4d1a0df1a..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs +++ /dev/null @@ -1,153 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL bytea data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-binary.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class ByteaHandler : NpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler -#if !NETSTANDARD2_0 - , INpgsqlTypeHandler>, INpgsqlTypeHandler> -#endif -{ - public ByteaHandler(PostgresType pgType) : base(pgType) {} - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - var bytes = new byte[len]; - var pos = 0; - while (true) - { - var toRead = Math.Min(len - pos, buf.ReadBytesLeft); - buf.ReadBytes(bytes, pos, toRead); - pos += toRead; - if (pos == len) - break; - await buf.ReadMore(async); - } - return bytes; - } - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing ArraySegment to PostgreSQL bytea is supported, no reading."); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Reading a PostgreSQL bytea as a Stream is unsupported, use NpgsqlDataReader.GetStream() instead.."); - - int ValidateAndGetLength(int bufferLen, NpgsqlParameter? parameter) - => parameter == null || parameter.Size <= 0 || parameter.Size >= bufferLen - ? bufferLen - : parameter.Size; - - int ValidateAndGetLength(Stream stream, NpgsqlParameter? parameter) - { - if (parameter != null && parameter.Size > 0) - return parameter.Size; - - if (!stream.CanSeek) - throw new NpgsqlException("Cannot write a stream of bytes. Either provide a positive size, or a seekable stream."); - - try - { - return (int)(stream.Length - stream.Position); - } - catch (Exception ex) - { - throw new NpgsqlException("The remaining bytes in the provided Stream exceed the maximum length. The vaule may be truncated by setting NpgsqlParameter.Size.", ex); - } - } - - /// - public override int ValidateAndGetLength(byte[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Length, parameter); - - /// - public int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Count, parameter); - - /// - public int ValidateAndGetLength(Stream value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, parameter); - - /// - public override Task Write(byte[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write(value, buf, 0, ValidateAndGetLength(value.Length, parameter), async, cancellationToken); - - /// - public Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value.Array is null ? Task.CompletedTask : Write(value.Array, buf, value.Offset, ValidateAndGetLength(value.Count, parameter), async, cancellationToken); - - /// - public Task Write(Stream value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write(value, buf, ValidateAndGetLength(value, parameter), async, cancellationToken); - - async Task Write(byte[] value, NpgsqlWriteBuffer buf, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - // The entire segment fits in our buffer, copy it as usual. - if (count <= buf.WriteSpaceLeft) - { - buf.WriteBytes(value, offset, count); - return; - } - - // The segment is larger than our buffer. Flush whatever is currently in the buffer and - // write the array directly to the socket. - await buf.Flush(async, cancellationToken); - await buf.DirectWrite(new ReadOnlyMemory(value, offset, count), async, cancellationToken); - } - - Task Write(Stream value, NpgsqlWriteBuffer buf, int count, bool async, CancellationToken cancellationToken = default) - => buf.WriteStreamRaw(value, count, async, cancellationToken); - -#if !NETSTANDARD2_0 - /// - public int ValidateAndGetLength(Memory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Length, parameter); - - /// - public int ValidateAndGetLength(ReadOnlyMemory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Length, parameter); - - /// - public async Task Write(ReadOnlyMemory value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (parameter != null && parameter.Size > 0 && parameter.Size < value.Length) - value = value.Slice(0, parameter.Size); - - // The entire segment fits in our buffer, copy it into the buffer as usual. - if (value.Length <= buf.WriteSpaceLeft) - { - buf.WriteBytes(value.Span); - return; - } - - // The segment is larger than our buffer. Perform a direct write, flushing whatever is currently in the buffer - // and then writing the array directly to the socket. - await buf.DirectWrite(value, async, cancellationToken); - } - - /// - public Task Write(Memory value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((ReadOnlyMemory)value, buf, lengthCache, parameter, async, cancellationToken); - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescriptioncancellationToken) - => throw new NotSupportedException("Only writing ReadOnlyMemory to PostgreSQL bytea is supported, no reading."); - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing Memory to PostgreSQL bytea is supported, no reading."); -#endif -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs deleted file mode 100644 index e5f02bddbe..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs +++ /dev/null @@ -1,10 +0,0 @@ - -// Only used for value types, but can't constrain because MappedCompositeHandler isn't constrained -#nullable disable - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class ByReference -{ - public T Value; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs deleted file mode 100644 index b1b633748b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs +++ /dev/null @@ -1,62 +0,0 @@ -using System; -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -class CompositeConstructorHandler -{ - public PostgresType PostgresType { get; } - public ConstructorInfo ConstructorInfo { get; } - public CompositeParameterHandler[] Handlers { get; } - - protected CompositeConstructorHandler(PostgresType postgresType, ConstructorInfo constructorInfo, CompositeParameterHandler[] handlers) - { - PostgresType = postgresType; - ConstructorInfo = constructorInfo; - Handlers = handlers; - } - - public virtual async ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(int), async); - - var fieldCount = buffer.ReadInt32(); - if (fieldCount != Handlers.Length) - throw new InvalidOperationException($"pg_attributes contains {Handlers.Length} fields for type {PostgresType.DisplayName}, but {fieldCount} fields were received."); - - var args = new object?[Handlers.Length]; - foreach (var handler in Handlers) - args[handler.ParameterPosition] = await handler.Read(buffer, async); - - return (TComposite)ConstructorInfo.Invoke(args); - } - - public static CompositeConstructorHandler Create(PostgresType postgresType, ConstructorInfo constructorInfo, CompositeParameterHandler[] parameterHandlers) - { - const int maxGenericParameters = 8; - - if (parameterHandlers.Length > maxGenericParameters) - return new CompositeConstructorHandler(postgresType, constructorInfo, parameterHandlers); - - var parameterTypes = new Type[1 + maxGenericParameters]; - foreach (var parameterHandler in parameterHandlers) - parameterTypes[1 + parameterHandler.ParameterPosition] = parameterHandler.ParameterType; - - for (var parameterIndex = 1; parameterIndex < parameterTypes.Length; parameterIndex++) - parameterTypes[parameterIndex] ??= typeof(Unused); - - parameterTypes[0] = typeof(TComposite); - return (CompositeConstructorHandler)Activator.CreateInstance( - typeof(CompositeConstructorHandler<,,,,,,,,>).MakeGenericType(parameterTypes), - BindingFlags.Instance | BindingFlags.Public, - binder: null, - args: new object[] { postgresType, constructorInfo, parameterHandlers }, - culture: null)!; - } - - readonly struct Unused - { - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs deleted file mode 100644 index b7d8a7b7b0..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs +++ /dev/null @@ -1,66 +0,0 @@ -using System; -using System.Linq; -using System.Linq.Expressions; -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeConstructorHandler : CompositeConstructorHandler -{ - delegate TComposite CompositeConstructor(in Arguments args); - - readonly CompositeConstructor _constructor; - - public CompositeConstructorHandler(PostgresType postgresType, ConstructorInfo constructorInfo, CompositeParameterHandler[] parameterHandlers) - : base(postgresType, constructorInfo, parameterHandlers) - { - var parameter = Expression.Parameter(typeof(Arguments).MakeByRefType()); - var fields = Enumerable - .Range(1, parameterHandlers.Length) - .Select(i => Expression.Field(parameter, "Argument" + i)); - - _constructor = Expression - .Lambda(Expression.New(constructorInfo, fields), parameter) - .Compile(); - } - - public override async ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(int), async); - - var fieldCount = buffer.ReadInt32(); - if (fieldCount != Handlers.Length) - throw new InvalidOperationException($"pg_attributes contains {Handlers.Length} fields for type {PostgresType.DisplayName}, but {fieldCount} fields were received."); - - var args = default(Arguments); - - foreach (var handler in Handlers) - switch (handler.ParameterPosition) - { - case 0: args.Argument1 = await handler.Read(buffer, async); break; - case 1: args.Argument2 = await handler.Read(buffer, async); break; - case 2: args.Argument3 = await handler.Read(buffer, async); break; - case 3: args.Argument4 = await handler.Read(buffer, async); break; - case 4: args.Argument5 = await handler.Read(buffer, async); break; - case 5: args.Argument6 = await handler.Read(buffer, async); break; - case 6: args.Argument7 = await handler.Read(buffer, async); break; - case 7: args.Argument8 = await handler.Read(buffer, async); break; - } - - return _constructor(args); - } - - struct Arguments - { - public T1 Argument1; - public T2 Argument2; - public T3 Argument3; - public T4 Argument4; - public T5 Argument5; - public T6 Argument6; - public T7 Argument7; - public T8 Argument8; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs deleted file mode 100644 index 7df3267f37..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs +++ /dev/null @@ -1,281 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Linq.Expressions; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -#region Trimming warning suppressions - -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2046", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2080", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2026", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2090", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2087", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2055", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2077", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] - -#endregion - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed partial class CompositeHandler : NpgsqlTypeHandler, ICompositeHandler -{ - readonly TypeMapper _typeMapper; - readonly INpgsqlNameTranslator _nameTranslator; - - Func? _constructor; - CompositeConstructorHandler? _constructorHandler; - CompositeMemberHandler[] _memberHandlers = null!; - - public Type CompositeType => typeof(T); - - public CompositeHandler(PostgresCompositeType postgresType, TypeMapper typeMapper, INpgsqlNameTranslator nameTranslator) - : base(postgresType) - { - _typeMapper = typeMapper; - _nameTranslator = nameTranslator; - } - - public override ValueTask Read(NpgsqlReadBuffer buffer, int length, bool async, FieldDescription? fieldDescription = null) - { - Initialize(); - - return _constructorHandler is null - ? ReadUsingMemberHandlers(buffer, async) - : _constructorHandler.Read(buffer, async); - - async ValueTask ReadUsingMemberHandlers(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(int), async); - - var fieldCount = buffer.ReadInt32(); - if (fieldCount != _memberHandlers.Length) - throw new InvalidOperationException($"pg_attributes contains {_memberHandlers.Length} fields for type {PgDisplayName}, but {fieldCount} fields were received."); - - if (IsValueType.Value) - { - var composite = new ByReference { Value = _constructor!() }; - foreach (var member in _memberHandlers) - await member.Read(composite, buffer, async); - - return composite.Value; - } - else - { - var composite = _constructor!(); - foreach (var member in _memberHandlers) - await member.Read(composite, buffer, async); - - return composite; - } - } - } - - public override async Task Write(T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - Initialize(); - - if (buffer.WriteSpaceLeft < sizeof(int)) - await buffer.Flush(async, cancellationToken); - - buffer.WriteInt32(_memberHandlers.Length); - - foreach (var member in _memberHandlers) - await member.Write(value, buffer, lengthCache, async, cancellationToken); - } - - public override int ValidateAndGetLength(T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - Initialize(); - - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - // Leave empty slot for the entire composite type, and go ahead an populate the element slots - var position = lengthCache.Position; - lengthCache.Set(0); - - // number of fields + (type oid + field length) * member count - var length = sizeof(int) + sizeof(int) * 2 * _memberHandlers.Length; - foreach (var member in _memberHandlers) - length += member.ValidateAndGetLength(value, ref lengthCache); - - return lengthCache.Lengths[position] = length; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Initialize() - { - if (_memberHandlers is null) - InitializeCore(); - - void InitializeCore() - { - var pgType = (PostgresCompositeType)PostgresType; - - _memberHandlers = CreateMemberHandlers(pgType, _typeMapper, _nameTranslator); - _constructorHandler = CreateConstructorHandler(pgType, _typeMapper, _nameTranslator); - _constructor = _constructorHandler is null - ? Expression - .Lambda>(Expression.New(typeof(T))) - .Compile() - : null; - } - } - - static CompositeConstructorHandler? CreateConstructorHandler(PostgresCompositeType pgType, TypeMapper typeMapper, INpgsqlNameTranslator nameTranslator) - { - var pgFields = pgType.Fields; - var clrType = typeof(T); - - ConstructorInfo? clrDefaultConstructor = null; - - foreach (var clrConstructor in clrType.GetConstructors()) - { - var clrParameters = clrConstructor.GetParameters(); - if (clrParameters.Length != pgFields.Count) - { - if (clrParameters.Length == 0) - clrDefaultConstructor = clrConstructor; - - continue; - } - - var clrParameterHandlerCount = 0; - var clrParametersMapped = new ParameterInfo[pgFields.Count]; - - foreach (var clrParameter in clrParameters) - { - var attr = clrParameter.GetCustomAttribute(); - var name = attr?.PgName ?? (clrParameter.Name is string clrName ? nameTranslator.TranslateMemberName(clrName) : null); - if (name is null) - break; - - for (var pgFieldIndex = pgFields.Count - 1; pgFieldIndex >= 0; --pgFieldIndex) - { - var pgField = pgFields[pgFieldIndex]; - if (pgField.Name != name) - continue; - - if (clrParametersMapped[pgFieldIndex] != null) - throw new AmbiguousMatchException($"Multiple constructor parameters are mapped to the '{pgField.Name}' field."); - - clrParameterHandlerCount++; - clrParametersMapped[pgFieldIndex] = clrParameter; - - break; - } - } - - if (clrParameterHandlerCount < pgFields.Count) - continue; - - var clrParameterHandlers = new CompositeParameterHandler[pgFields.Count]; - for (var pgFieldIndex = 0; pgFieldIndex < pgFields.Count; ++pgFieldIndex) - { - var pgField = pgFields[pgFieldIndex]; - - if (!typeMapper.TryResolveByOID(pgField.Type.OID, out var handler)) - throw new NpgsqlException($"PostgreSQL composite type {pgType.DisplayName} has field {pgField.Type.DisplayName} with an unknown type (OID = {pgField.Type.OID})."); - - var clrParameter = clrParametersMapped[pgFieldIndex]; - var clrParameterHandlerType = typeof(CompositeParameterHandler<>) - .MakeGenericType(clrParameter.ParameterType); - - clrParameterHandlers[pgFieldIndex] = (CompositeParameterHandler)Activator.CreateInstance( - clrParameterHandlerType, - BindingFlags.Instance | BindingFlags.Public, - binder: null, - args: new object[] { handler, clrParameter }, - culture: null)!; - } - - return CompositeConstructorHandler.Create(pgType, clrConstructor, clrParameterHandlers); - } - - if (clrDefaultConstructor is null && !clrType.IsValueType) - throw new InvalidOperationException($"No parameterless constructor defined for type '{clrType}'."); - - return null; - } - - static CompositeMemberHandler[] CreateMemberHandlers(PostgresCompositeType pgType, TypeMapper typeMapper, INpgsqlNameTranslator nameTranslator) - { - var pgFields = pgType.Fields; - - var clrType = typeof(T); - var clrMemberHandlers = new CompositeMemberHandler[pgFields.Count]; - var clrMemberHandlerCount = 0; - var clrMemberHandlerType = IsValueType.Value - ? typeof(CompositeStructMemberHandler<,>) - : typeof(CompositeClassMemberHandler<,>); - - foreach (var clrProperty in clrType.GetProperties(BindingFlags.Instance | BindingFlags.Public)) - CreateMemberHandler(clrProperty, clrProperty.PropertyType); - - foreach (var clrField in clrType.GetFields(BindingFlags.Instance | BindingFlags.Public)) - CreateMemberHandler(clrField, clrField.FieldType); - - if (clrMemberHandlerCount != pgFields.Count) - { - var notMappedFields = string.Join(", ", clrMemberHandlers - .Select((member, memberIndex) => member == null ? $"'{pgFields[memberIndex].Name}'" : null) - .Where(member => member != null)); - throw new InvalidOperationException($"PostgreSQL composite type {pgType.DisplayName} contains fields {notMappedFields} which could not match any on CLR type {clrType.Name}"); - } - - return clrMemberHandlers; - - void CreateMemberHandler(MemberInfo clrMember, Type clrMemberType) - { - var attr = clrMember.GetCustomAttribute(); - var name = attr?.PgName ?? nameTranslator.TranslateMemberName(clrMember.Name); - - for (var pgFieldIndex = pgFields.Count - 1; pgFieldIndex >= 0; --pgFieldIndex) - { - var pgField = pgFields[pgFieldIndex]; - if (pgField.Name != name) - continue; - - if (clrMemberHandlers[pgFieldIndex] != null) - throw new AmbiguousMatchException($"Multiple class members are mapped to the '{pgField.Name}' field."); - - if (!typeMapper.TryResolveByOID(pgField.Type.OID, out var handler)) - throw new NpgsqlException($"PostgreSQL composite type {pgType.DisplayName} has field {pgField.Type.DisplayName} with an unknown type (OID = {pgField.Type.OID})."); - - clrMemberHandlerCount++; - clrMemberHandlers[pgFieldIndex] = (CompositeMemberHandler)Activator.CreateInstance( - clrMemberHandlerType.MakeGenericType(clrType, clrMemberType), - BindingFlags.Instance | BindingFlags.Public, - binder: null, - args: new object[] { clrMember, pgField.Type, handler }, - culture: null)!; - - break; - } - } - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs deleted file mode 100644 index 48d57e9c82..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -abstract class CompositeMemberHandler -{ - public MemberInfo MemberInfo { get; } - public PostgresType PostgresType { get; } - - protected CompositeMemberHandler(MemberInfo memberInfo, PostgresType postgresType) - { - MemberInfo = memberInfo; - PostgresType = postgresType; - } - - public abstract ValueTask Read(TComposite composite, NpgsqlReadBuffer buffer, bool async); - - public abstract ValueTask Read(ByReference composite, NpgsqlReadBuffer buffer, bool async); - - public abstract Task Write(TComposite composite, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default); - - public abstract int ValidateAndGetLength(TComposite composite, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs deleted file mode 100644 index 0593e4d67e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq.Expressions; -using System.Reflection; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeClassMemberHandler : CompositeMemberHandler - where TComposite : class -{ - delegate TMember GetMember(TComposite composite); - delegate void SetMember(TComposite composite, TMember value); - - readonly GetMember? _get; - readonly SetMember? _set; - readonly NpgsqlTypeHandler _handler; - - public CompositeClassMemberHandler(FieldInfo fieldInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(fieldInfo, postgresType) - { - var composite = Expression.Parameter(typeof(TComposite), "composite"); - var value = Expression.Parameter(typeof(TMember), "value"); - - _get = Expression - .Lambda(Expression.Field(composite, fieldInfo), composite) - .Compile(); - _set = Expression - .Lambda(Expression.Assign(Expression.Field(composite, fieldInfo), value), composite, value) - .Compile(); - _handler = handler; - } - - public CompositeClassMemberHandler(PropertyInfo propertyInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(propertyInfo, postgresType) - { - var getMethod = propertyInfo.GetGetMethod(); - if (getMethod != null) - _get = (GetMember)Delegate.CreateDelegate(typeof(GetMember), getMethod); - - var setMethod = propertyInfo.GetSetMethod(); - if (setMethod != null) - _set = (SetMember)Delegate.CreateDelegate(typeof(SetMember), setMethod); - - Debug.Assert(setMethod != null || getMethod != null); - - _handler = handler; - } - - public override async ValueTask Read(TComposite composite, NpgsqlReadBuffer buffer, bool async) - { - if (_set == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertySetter(typeof(TComposite), MemberInfo); - - await buffer.Ensure(sizeof(uint) + sizeof(int), async); - - var oid = buffer.ReadUInt32(); - Debug.Assert(oid == PostgresType.OID); - - var length = buffer.ReadInt32(); - if (length == -1) - return; - - var value = NullableHandler.Exists - ? await NullableHandler.ReadAsync(_handler, buffer, length, async) - : await _handler.Read(buffer, length, async); - - _set(composite, value); - } - - public override ValueTask Read(ByReference composite, NpgsqlReadBuffer buffer, bool async) - => throw new NotSupportedException(); - - public override async Task Write(TComposite composite, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - if (buffer.WriteSpaceLeft < sizeof(int)) - await buffer.Flush(async, cancellationToken); - - buffer.WriteUInt32(PostgresType.OID); - if (NullableHandler.Exists) - await NullableHandler.WriteAsync(_handler, _get(composite), buffer, lengthCache, null, async, cancellationToken); - else - await _handler.WriteWithLength(_get(composite), buffer, lengthCache, null, async, cancellationToken); - } - - public override int ValidateAndGetLength(TComposite composite, ref NpgsqlLengthCache? lengthCache) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - var value = _get(composite); - if (value is null) - return 0; - - return NullableHandler.Exists - ? NullableHandler.ValidateAndGetLength(_handler, value, ref lengthCache, null) - : _handler.ValidateAndGetLength(value, ref lengthCache, null); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs deleted file mode 100644 index 2fa1d48ca3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs +++ /dev/null @@ -1,109 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq.Expressions; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeStructMemberHandler : CompositeMemberHandler - where TComposite : struct -{ - delegate TMember GetMember(ref TComposite composite); - delegate void SetMember(ref TComposite composite, TMember value); - - readonly GetMember? _get; - readonly SetMember? _set; - readonly NpgsqlTypeHandler _handler; - - public CompositeStructMemberHandler(FieldInfo fieldInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(fieldInfo, postgresType) - { - var composite = Expression.Parameter(typeof(TComposite).MakeByRefType(), "composite"); - var value = Expression.Parameter(typeof(TMember), "value"); - - _get = Expression - .Lambda(Expression.Field(composite, fieldInfo), composite) - .Compile(); - _set = Expression - .Lambda(Expression.Assign(Expression.Field(composite, fieldInfo), value), composite, value) - .Compile(); - _handler = handler; - } - - public CompositeStructMemberHandler(PropertyInfo propertyInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(propertyInfo, postgresType) - { - var getMethod = propertyInfo.GetGetMethod(); - if (getMethod != null) - _get = (GetMember)Delegate.CreateDelegate(typeof(GetMember), getMethod); - - var setMethod = propertyInfo.GetSetMethod(); - if (setMethod != null) - _set = (SetMember)Delegate.CreateDelegate(typeof(SetMember), setMethod); - - Debug.Assert(setMethod != null || getMethod != null); - - _handler = handler; - } - - public override ValueTask Read(TComposite composite, NpgsqlReadBuffer buffer, bool async) - => throw new NotSupportedException(); - - public override async ValueTask Read(ByReference composite, NpgsqlReadBuffer buffer, bool async) - { - if (_set == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertySetter(typeof(TComposite), MemberInfo); - - await buffer.Ensure(sizeof(uint) + sizeof(int), async); - - var oid = buffer.ReadUInt32(); - Debug.Assert(oid == PostgresType.OID); - - var length = buffer.ReadInt32(); - if (length == -1) - return; - - var value = NullableHandler.Exists - ? await NullableHandler.ReadAsync(_handler, buffer, length, async) - : await _handler.Read(buffer, length, async); - - Set(composite, value); - } - - public override async Task Write(TComposite composite, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - if (buffer.WriteSpaceLeft < sizeof(int)) - await buffer.Flush(async, cancellationToken); - - buffer.WriteUInt32(PostgresType.OID); - await (NullableHandler.Exists - ? NullableHandler.WriteAsync(_handler, _get(ref composite), buffer, lengthCache, null, async, cancellationToken) - : _handler.WriteWithLength(_get(ref composite), buffer, lengthCache, null, async, cancellationToken)); - } - - public override int ValidateAndGetLength(TComposite composite, ref NpgsqlLengthCache? lengthCache) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - var value = _get(ref composite); - if (value is null) - return 0; - - return NullableHandler.Exists - ? NullableHandler.ValidateAndGetLength(_handler, value, ref lengthCache, null) - : _handler.ValidateAndGetLength(value, ref lengthCache, null); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Set(ByReference composite, TMember value) - => _set!(ref composite.Value, value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs deleted file mode 100644 index f99de18bba..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System; -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -abstract class CompositeParameterHandler -{ - public NpgsqlTypeHandler Handler { get; } - public Type ParameterType { get; } - public int ParameterPosition { get; } - - public CompositeParameterHandler(NpgsqlTypeHandler handler, ParameterInfo parameterInfo) - { - Handler = handler; - ParameterType = parameterInfo.ParameterType; - ParameterPosition = parameterInfo.Position; - } - - public async ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(uint) + sizeof(int), async); - - var oid = buffer.ReadUInt32(); - var length = buffer.ReadInt32(); - if (length == -1) - return default!; - - return NullableHandler.Exists - ? await NullableHandler.ReadAsync(Handler, buffer, length, async) - : await Handler.Read(buffer, length, async); - } - - public abstract ValueTask Read(NpgsqlReadBuffer buffer, bool async); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs deleted file mode 100644 index 6c2d9dab8d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeParameterHandler : CompositeParameterHandler -{ - public CompositeParameterHandler(NpgsqlTypeHandler handler, ParameterInfo parameterInfo) - : base(handler, parameterInfo) { } - - public override ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - var task = Read(buffer, async); - return task.IsCompleted - ? new ValueTask(task.Result) - : AwaitTask(task); - - static async ValueTask AwaitTask(ValueTask task) => await task; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs deleted file mode 100644 index 5bb186233b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs +++ /dev/null @@ -1,11 +0,0 @@ -using System; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -interface ICompositeHandler -{ - /// - /// The CLR type mapped to the PostgreSQL composite type. - /// - Type CompositeType { get; } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs deleted file mode 100644 index 360cae915d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -static class IsValueType -{ - public static readonly bool Value = typeof(T).IsValueType; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs deleted file mode 100644 index 42bcb93d42..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs +++ /dev/null @@ -1,131 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL date data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class DateHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - static readonly DateTime BaseValueDateTime = new(2000, 1, 1, 0, 0, 0); - - /// - /// Constructs a - /// - public DateHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - public override DateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt32() switch - { - int.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MaxValue, - int.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MinValue, - var value => BaseValueDateTime + TimeSpan.FromDays(value) - }; - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt32(); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) => 4; - - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => 4; - - /// - public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == DateTime.MaxValue) - { - buf.WriteInt32(int.MaxValue); - return; - } - - if (value == DateTime.MinValue) - { - buf.WriteInt32(int.MinValue); - return; - } - } - - buf.WriteInt32((value - BaseValueDateTime).Days); - } - - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt32(value); - - #endregion Write - -#if NET6_0_OR_GREATER - static readonly DateOnly BaseValueDateOnly = new(2000, 1, 1); - - DateOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt32() switch - { - int.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateOnly.MaxValue, - int.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateOnly.MinValue, - var value => BaseValueDateOnly.AddDays(value) - }; - - public int ValidateAndGetLength(DateOnly value, NpgsqlParameter? parameter) => 4; - - public void Write(DateOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == DateOnly.MaxValue) - { - buf.WriteInt32(int.MaxValue); - return; - } - - if (value == DateOnly.MinValue) - { - buf.WriteInt32(int.MinValue); - return; - } - } - - buf.WriteInt32(value.DayNumber - BaseValueDateOnly.DayNumber); - } - - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new RangeHandler(pgRangeType, this); - - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgRangeType) - => new MultirangeHandler(pgRangeType, new RangeHandler(pgRangeType, this)); -#endif -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs deleted file mode 100644 index 8b702aad12..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs +++ /dev/null @@ -1,63 +0,0 @@ -using System; -using System.Runtime.CompilerServices; -using Npgsql.Properties; -using static Npgsql.Util.Statics; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -static class DateTimeUtils -{ - const long PostgresTimestampOffsetTicks = 630822816000000000L; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static DateTime DecodeTimestamp(long value, DateTimeKind kind) - => new(value * 10 + PostgresTimestampOffsetTicks, kind); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static long EncodeTimestamp(DateTime value) - // Rounding here would cause problems because we would round up DateTime.MaxValue - // which would make it impossible to retrieve it back from the database, so we just drop the additional precision - => (value.Ticks - PostgresTimestampOffsetTicks) / 10; - - internal static DateTime ReadDateTime(NpgsqlReadBuffer buf, DateTimeKind kind) - { - try - { - return buf.ReadInt64() switch - { - long.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MaxValue, - long.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MinValue, - var value => DecodeTimestamp(value, kind) - }; - } - catch (ArgumentOutOfRangeException e) - { - throw new InvalidCastException("Out of the range of DateTime (year must be between 1 and 9999)", e); - } - } - - internal static void WriteTimestamp(DateTime value, NpgsqlWriteBuffer buf) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == DateTime.MaxValue) - { - buf.WriteInt64(long.MaxValue); - return; - } - - if (value == DateTime.MinValue) - { - buf.WriteInt64(long.MinValue); - return; - } - } - - var postgresTimestamp = EncodeTimestamp(value); - buf.WriteInt64(postgresTimestamp); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs deleted file mode 100644 index 9cce23e486..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs +++ /dev/null @@ -1,70 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL date interval type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class IntervalHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - /// - /// Constructs an - /// - public IntervalHandler(PostgresType postgresType) : base(postgresType) {} - - /// - public override TimeSpan Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var microseconds = buf.ReadInt64(); - var days = buf.ReadInt32(); - var months = buf.ReadInt32(); - - if (months > 0) - throw new InvalidCastException(NpgsqlStrings.CannotReadIntervalWithMonthsAsTimeSpan); - - return new(microseconds * 10 + days * TimeSpan.TicksPerDay); - } - - NpgsqlInterval INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var ticks = buf.ReadInt64(); - var day = buf.ReadInt32(); - var month = buf.ReadInt32(); - return new NpgsqlInterval(month, day, ticks); - } - - /// - public override int ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) => 16; - - /// - public int ValidateAndGetLength(NpgsqlInterval value, NpgsqlParameter? parameter) => 16; - - /// - public override void Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var ticksInDay = value.Ticks - TimeSpan.TicksPerDay * value.Days; - - buf.WriteInt64(ticksInDay / 10); - buf.WriteInt32(value.Days); - buf.WriteInt32(0); - } - - public void Write(NpgsqlInterval value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteInt64(value.Time); - buf.WriteInt32(value.Days); - buf.WriteInt32(value.Months); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs deleted file mode 100644 index f4ec3b689b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs +++ /dev/null @@ -1,52 +0,0 @@ -using System; -using System.Data; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL time data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimeHandler : NpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - /// - /// Constructs a . - /// - public TimeHandler(PostgresType postgresType) : base(postgresType) {} - - // PostgreSQL time resolution == 1 microsecond == 10 ticks - /// - public override TimeSpan Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadInt64() * 10); - - /// - public override int ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value.Ticks / 10); - -#if NET6_0_OR_GREATER - TimeOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => new(buf.ReadInt64() * 10); - - public int ValidateAndGetLength(TimeOnly value, NpgsqlParameter? parameter) => 8; - - public void Write(TimeOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value.Ticks / 10); -#endif -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs deleted file mode 100644 index 464c4abd01..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs +++ /dev/null @@ -1,53 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL timetz data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimeTzHandler : NpgsqlSimpleTypeHandler -{ - // Binary Format: int64 expressing microseconds, int32 expressing timezone in seconds, negative - - /// - /// Constructs an . - /// - public TimeTzHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - public override DateTimeOffset Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. - var ticks = buf.ReadInt64() * 10; - var offset = new TimeSpan(0, 0, -buf.ReadInt32()); - return new DateTimeOffset(ticks + TimeSpan.TicksPerDay, offset); - } - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) => 12; - - /// - public override void Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteInt64(value.TimeOfDay.Ticks / 10); - buf.WriteInt32(-(int)(value.Offset.Ticks / TimeSpan.TicksPerSecond)); - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs deleted file mode 100644 index 1887318b44..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs +++ /dev/null @@ -1,62 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using static Npgsql.Util.Statics; -using static Npgsql.Internal.TypeHandlers.DateTimeHandlers.DateTimeUtils; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL timestamp data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimestampHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - /// - /// Constructs a . - /// - public TimestampHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - public override DateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => ReadDateTime(buf, DateTimeKind.Unspecified); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt64(); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => value.Kind != DateTimeKind.Utc || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - "Cannot write DateTime with Kind=UTC to PostgreSQL type 'timestamp without time zone', " + - "consider using 'timestamp with time zone'. " + - "Note that it's not possible to mix DateTimes with different Kinds in an array/range. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => WriteTimestamp(value, buf); - - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs deleted file mode 100644 index 66b3397ecb..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs +++ /dev/null @@ -1,143 +0,0 @@ -using System; -using System.Diagnostics; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; -using static Npgsql.Util.Statics; -using static Npgsql.Internal.TypeHandlers.DateTimeHandlers.DateTimeUtils; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL timestamptz data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimestampTzHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - /// - /// Constructs an . - /// - public TimestampTzHandler(PostgresType postgresType) : base(postgresType) {} - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new RangeHandler(pgRangeType, this); - - #region Read - - /// - public override DateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var dateTime = ReadDateTime(buf, DateTimeKind.Utc); - return LegacyTimestampBehavior && (DisableDateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue) - ? dateTime.ToLocalTime() - : dateTime; - } - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - try - { - var value = buf.ReadInt64(); - switch (value) - { - case long.MaxValue: - return DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTimeOffset.MaxValue; - case long.MinValue: - return DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTimeOffset.MinValue; - default: - var dateTime = DecodeTimestamp(value, DateTimeKind.Utc); - return LegacyTimestampBehavior ? dateTime.ToLocalTime() : dateTime; - } - } - catch (ArgumentOutOfRangeException e) - { - throw new InvalidCastException("Out of the range of DateTime (year must be between 1 and 9999)", e); - } - } - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt64(); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => value.Kind == DateTimeKind.Utc || - value == DateTime.MinValue || // Allowed since this is default(DateTime) - sent without any timezone conversion. - value == DateTime.MaxValue && !DisableDateTimeInfinityConversions || - LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write DateTime with Kind={value.Kind} to PostgreSQL type 'timestamp with time zone', only UTC is supported. " + - "Note that it's not possible to mix DateTimes with different Kinds in an array/range. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - /// - public int ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => value.Offset == TimeSpan.Zero || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write DateTimeOffset with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', " + - "only offset 0 (UTC) is supported. " + - "Note that it's not possible to mix DateTimes with different Kinds in an array/range. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (LegacyTimestampBehavior) - { - switch (value.Kind) - { - case DateTimeKind.Unspecified: - case DateTimeKind.Utc: - break; - case DateTimeKind.Local: - value = value.ToUniversalTime(); - break; - default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {value.Kind} of enum {nameof(DateTimeKind)}. Please file a bug."); - } - } - else - Debug.Assert(value.Kind == DateTimeKind.Utc || value == DateTime.MinValue || value == DateTime.MaxValue); - - WriteTimestamp(value, buf); - } - - /// - public void Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (LegacyTimestampBehavior) - value = value.ToUniversalTime(); - - Debug.Assert(value.Offset == TimeSpan.Zero); - - WriteTimestamp(value.DateTime, buf); - } - - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/EnumHandler.cs b/src/Npgsql/Internal/TypeHandlers/EnumHandler.cs deleted file mode 100644 index 2604563790..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/EnumHandler.cs +++ /dev/null @@ -1,74 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Linq; -using System.Reflection; -using System.Text; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Interface implemented by all concrete handlers which handle enums -/// -interface IEnumHandler -{ - /// - /// The CLR enum type mapped to the PostgreSQL enum - /// - Type EnumType { get; } -} - -sealed partial class EnumHandler : NpgsqlSimpleTypeHandler, IEnumHandler where TEnum : struct, Enum -{ - readonly Dictionary _enumToLabel; - readonly Dictionary _labelToEnum; - - public Type EnumType => typeof(TEnum); - - #region Construction - - internal EnumHandler(PostgresEnumType postgresType, Dictionary enumToLabel, Dictionary labelToEnum) - : base(postgresType) - { - Debug.Assert(typeof(TEnum).GetTypeInfo().IsEnum, "EnumHandler instantiated for non-enum type"); - _enumToLabel = enumToLabel; - _labelToEnum = labelToEnum; - } - - #endregion - - #region Read - - public override TEnum Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var str = buf.ReadString(len); - var success = _labelToEnum.TryGetValue(str, out var value); - - if (!success) - throw new InvalidCastException($"Received enum value '{str}' from database which wasn't found on enum {typeof(TEnum)}"); - - return value; - } - - #endregion - - #region Write - - public override int ValidateAndGetLength(TEnum value, NpgsqlParameter? parameter) - => _enumToLabel.TryGetValue(value, out var str) - ? Encoding.UTF8.GetByteCount(str) - : throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); - - public override void Write(TEnum value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!_enumToLabel.TryGetValue(value, out var str)) - throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); - buf.WriteString(str); - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs b/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs deleted file mode 100644 index 1fefb0f598..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs +++ /dev/null @@ -1,291 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -// TODO: Need to work on the nullability here -#nullable disable -#pragma warning disable CS8632 -#pragma warning disable RS0041 - -namespace Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; - -/// -/// A type handler for the PostgreSQL tsquery data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TsQueryHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler -{ - // 1 (type) + 1 (weight) + 1 (is prefix search) + 2046 (max str len) + 1 (null terminator) - const int MaxSingleTokenBytes = 2050; - - public TsQueryHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numTokens = buf.ReadInt32(); - if (numTokens == 0) - return new NpgsqlTsQueryEmpty(); - - NpgsqlTsQuery? value = null; - var nodes = new Stack>(); - len -= 4; - - for (var tokenPos = 0; tokenPos < numTokens; tokenPos++) - { - await buf.Ensure(Math.Min(len, MaxSingleTokenBytes), async); - var readPos = buf.ReadPosition; - - var isOper = buf.ReadByte() == 2; - if (isOper) - { - var operKind = (NpgsqlTsQuery.NodeKind)buf.ReadByte(); - if (operKind == NpgsqlTsQuery.NodeKind.Not) - { - var node = new NpgsqlTsQueryNot(null); - InsertInTree(node, nodes, ref value); - nodes.Push(new Tuple(node, 0)); - } - else - { - var node = operKind switch - { - NpgsqlTsQuery.NodeKind.And => (NpgsqlTsQuery)new NpgsqlTsQueryAnd(null, null), - NpgsqlTsQuery.NodeKind.Or => new NpgsqlTsQueryOr(null, null), - NpgsqlTsQuery.NodeKind.Phrase => new NpgsqlTsQueryFollowedBy(null, buf.ReadInt16(), null), - _ => throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {operKind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug.") - }; - - InsertInTree(node, nodes, ref value); - - nodes.Push(new Tuple(node, 1)); - nodes.Push(new Tuple(node, 2)); - } - } - else - { - var weight = (NpgsqlTsQueryLexeme.Weight)buf.ReadByte(); - var prefix = buf.ReadByte() != 0; - var str = buf.ReadNullTerminatedString(); - InsertInTree(new NpgsqlTsQueryLexeme(str, weight, prefix), nodes, ref value); - } - - len -= buf.ReadPosition - readPos; - } - - if (nodes.Count != 0) - throw new InvalidOperationException("Internal Npgsql bug, please report."); - - return value!; - - static void InsertInTree(NpgsqlTsQuery node, Stack> nodes, ref NpgsqlTsQuery? value) - { - if (nodes.Count == 0) - value = node; - else - { - var parent = nodes.Pop(); - if (parent.Item2 == 0) - ((NpgsqlTsQueryNot)parent.Item1).Child = node; - else if (parent.Item2 == 1) - ((NpgsqlTsQueryBinOp)parent.Item1).Left = node; - else - ((NpgsqlTsQueryBinOp)parent.Item1).Right = node; - } - } - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryEmpty)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryLexeme)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryNot)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryAnd)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryOr)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryFollowedBy)await Read(buf, len, async, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(NpgsqlTsQuery value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Kind == NpgsqlTsQuery.NodeKind.Empty - ? 4 - : 4 + GetNodeLength(value); - - int GetNodeLength(NpgsqlTsQuery node) - { - // TODO: Figure out the nullability strategy here - switch (node.Kind) - { - case NpgsqlTsQuery.NodeKind.Lexeme: - var strLen = Encoding.UTF8.GetByteCount(((NpgsqlTsQueryLexeme)node).Text); - if (strLen > 2046) - throw new InvalidCastException("Lexeme text too long. Must be at most 2046 bytes in UTF8."); - return 4 + strLen; - case NpgsqlTsQuery.NodeKind.And: - case NpgsqlTsQuery.NodeKind.Or: - return 2 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right); - case NpgsqlTsQuery.NodeKind.Phrase: - // 2 additional bytes for uint16 phrase operator "distance" field. - return 4 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right); - case NpgsqlTsQuery.NodeKind.Not: - return 2 + GetNodeLength(((NpgsqlTsQueryNot)node).Child); - case NpgsqlTsQuery.NodeKind.Empty: - throw new InvalidOperationException("Empty tsquery nodes must be top-level"); - default: - throw new InvalidOperationException("Illegal node kind: " + node.Kind); - } - } - - /// - public override async Task Write(NpgsqlTsQuery query, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var numTokens = GetTokenCount(query); - - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(numTokens); - - if (numTokens == 0) - return; - - await WriteCore(query, buf, async, cancellationToken); - - static async Task WriteCore(NpgsqlTsQuery node, NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(node.Kind == NpgsqlTsQuery.NodeKind.Lexeme ? (byte)1 : (byte)2); - - if (node.Kind == NpgsqlTsQuery.NodeKind.Lexeme) - { - if (buf.WriteSpaceLeft < MaxSingleTokenBytes) - await buf.Flush(async, cancellationToken); - - var lexemeNode = (NpgsqlTsQueryLexeme)node; - buf.WriteByte((byte)lexemeNode.Weights); - buf.WriteByte(lexemeNode.IsPrefixSearch ? (byte)1 : (byte)0); - buf.WriteString(lexemeNode.Text); - buf.WriteByte(0); - return; - } - - buf.WriteByte((byte)node.Kind); - if (node.Kind == NpgsqlTsQuery.NodeKind.Not) - { - await WriteCore(((NpgsqlTsQueryNot)node).Child, buf, async, cancellationToken); - return; - } - - if (node.Kind == NpgsqlTsQuery.NodeKind.Phrase) - buf.WriteInt16(((NpgsqlTsQueryFollowedBy)node).Distance); - - await WriteCore(((NpgsqlTsQueryBinOp)node).Right, buf, async, cancellationToken); - await WriteCore(((NpgsqlTsQueryBinOp)node).Left, buf, async, cancellationToken); - } - } - - int GetTokenCount(NpgsqlTsQuery node) - { - switch (node.Kind) - { - case NpgsqlTsQuery.NodeKind.Lexeme: - return 1; - case NpgsqlTsQuery.NodeKind.And: - case NpgsqlTsQuery.NodeKind.Or: - case NpgsqlTsQuery.NodeKind.Phrase: - return 1 + GetTokenCount(((NpgsqlTsQueryBinOp)node).Left) + GetTokenCount(((NpgsqlTsQueryBinOp)node).Right); - case NpgsqlTsQuery.NodeKind.Not: - return 1 + GetTokenCount(((NpgsqlTsQueryNot)node).Child); - case NpgsqlTsQuery.NodeKind.Empty: - return 0; - } - return -1; - } - - /// - public int ValidateAndGetLength(NpgsqlTsQueryOr value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryAnd value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryNot value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryLexeme value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryEmpty value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryFollowedBy value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public Task Write(NpgsqlTsQueryOr value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryAnd value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryNot value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryLexeme value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryEmpty value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write( - NpgsqlTsQueryFollowedBy value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs deleted file mode 100644 index 141e566fd1..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs +++ /dev/null @@ -1,97 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; - -/// -/// A type handler for the PostgreSQL tsvector data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TsVectorHandler : NpgsqlTypeHandler -{ - // 2561 = 2046 (max length lexeme string) + (1) null terminator + - // 2 (num_pos) + sizeof(int16) * 256 (max_num_pos (positions/wegihts)) - const int MaxSingleLexemeBytes = 2561; - - public TsVectorHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numLexemes = buf.ReadInt32(); - len -= 4; - - var lexemes = new List(); - for (var lexemePos = 0; lexemePos < numLexemes; lexemePos++) - { - await buf.Ensure(Math.Min(len, MaxSingleLexemeBytes), async); - var posBefore = buf.ReadPosition; - - List? positions = null; - - var lexemeString = buf.ReadNullTerminatedString(); - int numPositions = buf.ReadInt16(); - for (var i = 0; i < numPositions; i++) - { - var wordEntryPos = buf.ReadInt16(); - if (positions == null) - positions = new List(); - positions.Add(new NpgsqlTsVector.Lexeme.WordEntryPos(wordEntryPos)); - } - - lexemes.Add(new NpgsqlTsVector.Lexeme(lexemeString, positions, true)); - - len -= buf.ReadPosition - posBefore; - } - - return new NpgsqlTsVector(lexemes, true); - } - - #endregion Read - - #region Write - - // TODO: Implement length cache - /// - public override int ValidateAndGetLength(NpgsqlTsVector value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 4 + value.Sum(l => Encoding.UTF8.GetByteCount(l.Text) + 1 + 2 + l.Count * 2); - - /// - public override async Task Write(NpgsqlTsVector vector, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(vector.Count); - - foreach (var lexeme in vector) - { - if (buf.WriteSpaceLeft < MaxSingleLexemeBytes) - await buf.Flush(async, cancellationToken); - - buf.WriteString(lexeme.Text); - buf.WriteByte(0); - buf.WriteInt16(lexeme.Count); - for (var i = 0; i < lexeme.Count; i++) - buf.WriteInt16(lexeme[i].Value); - } - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs deleted file mode 100644 index 6ff333f47e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs +++ /dev/null @@ -1,41 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL box data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class BoxHandler : NpgsqlSimpleTypeHandler -{ - public BoxHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlBox Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new( - new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble()), - new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble()) - ); - - /// - public override int ValidateAndGetLength(NpgsqlBox value, NpgsqlParameter? parameter) - => 32; - - /// - public override void Write(NpgsqlBox value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.Right); - buf.WriteDouble(value.Top); - buf.WriteDouble(value.Left); - buf.WriteDouble(value.Bottom); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs deleted file mode 100644 index b450177cd3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs +++ /dev/null @@ -1,37 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL circle data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class CircleHandler : NpgsqlSimpleTypeHandler -{ - public CircleHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlCircle Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlCircle value, NpgsqlParameter? parameter) - => 24; - - /// - public override void Write(NpgsqlCircle value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.X); - buf.WriteDouble(value.Y); - buf.WriteDouble(value.Radius); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs deleted file mode 100644 index 8b16b68a67..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs +++ /dev/null @@ -1,37 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL line data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class LineHandler : NpgsqlSimpleTypeHandler -{ - public LineHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlLine Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlLine value, NpgsqlParameter? parameter) - => 24; - - /// - public override void Write(NpgsqlLine value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.A); - buf.WriteDouble(value.B); - buf.WriteDouble(value.C); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs deleted file mode 100644 index f34083602f..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs +++ /dev/null @@ -1,38 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL lseg data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class LineSegmentHandler : NpgsqlSimpleTypeHandler -{ - public LineSegmentHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlLSeg Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlLSeg value, NpgsqlParameter? parameter) - => 32; - - /// - public override void Write(NpgsqlLSeg value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.Start.X); - buf.WriteDouble(value.Start.Y); - buf.WriteDouble(value.End.X); - buf.WriteDouble(value.End.Y); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs deleted file mode 100644 index 4b7aa4c8b5..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs +++ /dev/null @@ -1,74 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL path data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class PathHandler : NpgsqlTypeHandler -{ - public PathHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(5, async); - var open = buf.ReadByte() switch - { - 1 => false, - 0 => true, - _ => throw new Exception("Error decoding binary geometric path: bad open byte") - }; - - var numPoints = buf.ReadInt32(); - var result = new NpgsqlPath(numPoints, open); - for (var i = 0; i < numPoints; i++) - { - await buf.Ensure(16, async); - result.Add(new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble())); - } - return result; - } - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(NpgsqlPath value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 5 + value.Count * 16; - - /// - public override async Task Write(NpgsqlPath value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 5) - await buf.Flush(async, cancellationToken); - buf.WriteByte((byte)(value.Open ? 0 : 1)); - buf.WriteInt32(value.Count); - - foreach (var p in value) - { - if (buf.WriteSpaceLeft < 16) - await buf.Flush(async, cancellationToken); - buf.WriteDouble(p.X); - buf.WriteDouble(p.Y); - } - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs deleted file mode 100644 index d02bd67ec8..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs +++ /dev/null @@ -1,36 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL point data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class PointHandler : NpgsqlSimpleTypeHandler -{ - public PointHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlPoint Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlPoint value, NpgsqlParameter? parameter) - => 16; - - /// - public override void Write(NpgsqlPoint value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.X); - buf.WriteDouble(value.Y); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs deleted file mode 100644 index 004bd3ebbc..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs +++ /dev/null @@ -1,65 +0,0 @@ -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL polygon data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class PolygonHandler : NpgsqlTypeHandler -{ - public PolygonHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numPoints = buf.ReadInt32(); - var result = new NpgsqlPolygon(numPoints); - for (var i = 0; i < numPoints; i++) - { - await buf.Ensure(16, async); - result.Add(new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble())); - } - return result; - } - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(NpgsqlPolygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 4 + value.Count * 16; - - /// - public override async Task Write(NpgsqlPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Count); - - foreach (var p in value) - { - if (buf.WriteSpaceLeft < 16) - await buf.Flush(async, cancellationToken); - buf.WriteDouble(p.X); - buf.WriteDouble(p.Y); - } - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs b/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs deleted file mode 100644 index 57c10a2fbc..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs +++ /dev/null @@ -1,193 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -#if NETCOREAPP1_0_OR_GREATER -using System.Collections.Immutable; -#endif - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL hstore extension data type, which stores sets of key/value pairs within a -/// single PostgreSQL value. -/// -/// -/// See https://www.postgresql.org/docs/current/hstore.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class HstoreHandler : - NpgsqlTypeHandler>, - INpgsqlTypeHandler> -#if NETCOREAPP1_0_OR_GREATER - , INpgsqlTypeHandler> -#endif -{ - /// - /// The text handler to which we delegate encoding/decoding of the actual strings - /// - readonly TextHandler _textHandler; - - internal HstoreHandler(PostgresType postgresType, TextHandler textHandler) - : base(postgresType) - => _textHandler = textHandler; - - #region Write - - /// - public int ValidateAndGetLength(IDictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - // Leave empty slot for the entire hstore length, and go ahead an populate the individual string slots - var pos = lengthCache.Position; - lengthCache.Set(0); - - var totalLen = 4; // Number of key-value pairs - foreach (var kv in value) - { - totalLen += 8; // Key length + value length - if (kv.Key == null) - throw new FormatException("HSTORE doesn't support null keys"); - totalLen += _textHandler.ValidateAndGetLength(kv.Key, ref lengthCache, null); - if (kv.Value != null) - totalLen += _textHandler.ValidateAndGetLength(kv.Value!, ref lengthCache, null); - } - - return lengthCache.Lengths[pos] = totalLen; - } - - /// - public override int ValidateAndGetLength(Dictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - /// - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { -#if NETCOREAPP1_0_OR_GREATER - ImmutableDictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), -#endif - Dictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - IDictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type HstoreHandler") - }; - - /// - public override Task WriteObjectWithLength( - object? value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => value switch - { -#if NETCOREAPP1_0_OR_GREATER - ImmutableDictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), -#endif - Dictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - IDictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type BoolHandler") - }; - - /// - public async Task Write(IDictionary value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Count); - if (value.Count == 0) - return; - - foreach (var kv in value) - { - await _textHandler.WriteWithLength(kv.Key, buf, lengthCache, parameter, async, cancellationToken); - await _textHandler.WriteWithLength(kv.Value, buf, lengthCache, parameter, async, cancellationToken); - } - } - - /// - public override Task Write(Dictionary value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write(value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion - - #region Read - - async ValueTask ReadInto(T dictionary, int numElements, NpgsqlReadBuffer buf, bool async) - where T : IDictionary - { - for (var i = 0; i < numElements; i++) - { - await buf.Ensure(4, async); - var keyLen = buf.ReadInt32(); - Debug.Assert(keyLen != -1); - var key = await _textHandler.Read(buf, keyLen, async); - - await buf.Ensure(4, async); - var valueLen = buf.ReadInt32(); - - dictionary[key] = valueLen == -1 - ? null - : await _textHandler.Read(buf, valueLen, async); - } - return dictionary; - } - - /// - public override async ValueTask> Read(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numElements = buf.ReadInt32(); - return await ReadInto(new Dictionary(numElements), numElements, buf, async); - } - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => new(Read(buf, len, async, fieldDescription).Result); - - #endregion - -#if NETCOREAPP1_0_OR_GREATER - #region ImmutableDictionary - - /// - public int ValidateAndGetLength( - ImmutableDictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((IDictionary)value, ref lengthCache, parameter); - - /// - public Task Write(ImmutableDictionary value, - NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((IDictionary)value, buf, lengthCache, parameter, async, cancellationToken); - - async ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numElements = buf.ReadInt32(); - return (await ReadInto(ImmutableDictionary.Empty.ToBuilder(), numElements, buf, async)) - .ToImmutable(); - } - - #endregion -#endif -} diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs deleted file mode 100644 index 95fae5dcb9..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandlers.NumericHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -/// -/// An int2vector is simply a regular array of shorts, with the sole exception that its lower bound must -/// be 0 (we send 1 for regular arrays). -/// -sealed class Int2VectorHandler : ArrayHandler -{ - public Int2VectorHandler(PostgresType arrayPostgresType, PostgresType postgresShortType) - : base(arrayPostgresType, new Int16Handler(postgresShortType), ArrayNullabilityMode.Never, 0) { } - - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler>(pgArrayType, this, arrayNullabilityMode); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs deleted file mode 100644 index 2131cc16c8..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs +++ /dev/null @@ -1,87 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -/// -/// A type handler for the PostgreSQL "char" type, used only internally. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-character.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class InternalCharHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public InternalCharHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override char Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => (char)buf.ReadByte(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - short INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - #endregion - - #region Write - - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 1; - - /// - public override int ValidateAndGetLength(char value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public int ValidateAndGetLength(short value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public override void Write(char value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte(value); - /// - public void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs deleted file mode 100644 index d17f069871..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandlers.NumericHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -/// -/// An OIDVector is simply a regular array of uints, with the sole exception that its lower bound must -/// be 0 (we send 1 for regular arrays). -/// -sealed class OIDVectorHandler : ArrayHandler -{ - public OIDVectorHandler(PostgresType oidvectorType, PostgresType oidType) - : base(oidvectorType, new UInt32Handler(oidType), ArrayNullabilityMode.Never, 0) { } - - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler>(pgArrayType, this, arrayNullabilityMode); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs deleted file mode 100644 index 75e85ab3e6..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs +++ /dev/null @@ -1,31 +0,0 @@ -using System.Diagnostics; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -sealed partial class PgLsnHandler : NpgsqlSimpleTypeHandler -{ - public PgLsnHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - public override NpgsqlLogSequenceNumber Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(len == 8); - return new NpgsqlLogSequenceNumber(buf.ReadUInt64()); - } - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(NpgsqlLogSequenceNumber value, NpgsqlParameter? parameter) => 8; - - public override void Write(NpgsqlLogSequenceNumber value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteUInt64((ulong)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs deleted file mode 100644 index 0148fc1071..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs +++ /dev/null @@ -1,39 +0,0 @@ -using System.Diagnostics; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -sealed partial class TidHandler : NpgsqlSimpleTypeHandler -{ - public TidHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - public override NpgsqlTid Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(len == 6); - - var blockNumber = buf.ReadUInt32(); - var offsetNumber = buf.ReadUInt16(); - - return new NpgsqlTid(blockNumber, offsetNumber); - } - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(NpgsqlTid value, NpgsqlParameter? parameter) - => 6; - - public override void Write(NpgsqlTid value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteUInt32(value.BlockNumber); - buf.WriteUInt16(value.OffsetNumber); - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs deleted file mode 100644 index 6ccd11fc0a..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs +++ /dev/null @@ -1,240 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.IO; -using System.Text; -using System.Text.Json; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL json and jsonb data type. -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-json.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class JsonHandler : NpgsqlTypeHandler, ITextReaderHandler -{ - readonly JsonSerializerOptions _serializerOptions; - readonly TextHandler _textHandler; - readonly bool _isJsonb; - readonly int _headerLen; - - /// - /// Prepended to the string in the wire encoding - /// - const byte JsonbProtocolVersion = 1; - - static readonly JsonSerializerOptions DefaultSerializerOptions = new(); - - /// - public JsonHandler(PostgresType postgresType, Encoding encoding, bool isJsonb, JsonSerializerOptions? serializerOptions = null) - : base(postgresType) - { - _serializerOptions = serializerOptions ?? DefaultSerializerOptions; - _isJsonb = isJsonb; - _headerLen = isJsonb ? 1 : 0; - _textHandler = new TextHandler(postgresType, encoding); - } - - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (typeof(TAny) == typeof(string) || - typeof(TAny) == typeof(char[]) || - typeof(TAny) == typeof(ArraySegment) || - typeof(TAny) == typeof(char) || - typeof(TAny) == typeof(byte[])) - { - return _textHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen; - } - - if (typeof(TAny) == typeof(JsonDocument)) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var data = SerializeJsonDocument((JsonDocument)(object)value!); - if (parameter != null) - parameter.ConvertedValue = data; - return lengthCache.Set(data.Length + _headerLen); - } - - // User POCO, need to serialize. At least internally ArrayPool buffers are used... - var s = JsonSerializer.Serialize(value, _serializerOptions); - if (parameter != null) - parameter.ConvertedValue = s; - - return _textHandler.ValidateAndGetLength(s, ref lengthCache, parameter) + _headerLen; - } - - /// - protected override async Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var spaceRequired = _isJsonb ? 5 : 4; - - if (buf.WriteSpaceLeft < spaceRequired) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - - if (_isJsonb) - buf.WriteByte(JsonbProtocolVersion); - - if (typeof(TAny) == typeof(string)) - await _textHandler.Write((string)(object)value!, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(char[])) - await _textHandler.Write((char[])(object)value!, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(ArraySegment)) - await _textHandler.Write((ArraySegment)(object)value!, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(char)) - await _textHandler.Write((char)(object)value!, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(byte[])) - await _textHandler.Write((byte[])(object)value!, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(JsonDocument)) - { - var data = parameter?.ConvertedValue != null - ? (byte[])parameter.ConvertedValue - : SerializeJsonDocument((JsonDocument)(object)value!); - await buf.WriteBytesRaw(data, async, cancellationToken); - } - else - { - // User POCO, read serialized representation from the validation phase - var s = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonSerializer.Serialize(value!, value!.GetType(), _serializerOptions); - - await _textHandler.Write(s, buf, lengthCache, parameter, async, cancellationToken); - } - } - - /// - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - - /// - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (_isJsonb) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - buf.WriteByte(JsonbProtocolVersion); - } - - await _textHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - /// - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - string s => ValidateAndGetLength(s, ref lengthCache, parameter), - char[] s => ValidateAndGetLength(s, ref lengthCache, parameter), - ArraySegment s => ValidateAndGetLength(s, ref lengthCache, parameter), - char s => ValidateAndGetLength(s, ref lengthCache, parameter), - byte[] s => ValidateAndGetLength(s, ref lengthCache, parameter), - JsonDocument jsonDocument => ValidateAndGetLength(jsonDocument, ref lengthCache, parameter), - _ => ValidateAndGetLength(value, ref lengthCache, parameter) - }; - - /// - public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // We call into WriteWithLength below, which assumes it as at least enough write space for the length - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - await (value switch - { - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - string s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - char[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - ArraySegment s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - char s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - byte[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), - _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), - }); - } - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription = null) - { - if (_isJsonb) - { - await buf.Ensure(1, async); - var version = buf.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NotSupportedException($"Don't know how to decode JSONB with wire format {version}, your connection is now broken"); - byteLen--; - } - - if (typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[])) - { - return await _textHandler.Read(buf, byteLen, async, fieldDescription); - } - - // JsonDocument is a view over its provided buffer, so we can't return one over our internal buffer (see #2811), so we deserialize - // a string and get a JsonDocument from that. #2818 tracks improving this. - if (typeof(T) == typeof(JsonDocument)) - return (T)(object)JsonDocument.Parse(await _textHandler.Read(buf, byteLen, async, fieldDescription)); - - // User POCO - if (buf.ReadBytesLeft >= byteLen) - return JsonSerializer.Deserialize(buf.ReadSpan(byteLen), _serializerOptions)!; - -#if NET6_0_OR_GREATER - return (async - ? await JsonSerializer.DeserializeAsync(buf.GetStream(byteLen, canSeek: false), _serializerOptions) - : JsonSerializer.Deserialize(buf.GetStream(byteLen, canSeek: false), _serializerOptions))!; -#else - return JsonSerializer.Deserialize(await _textHandler.Read(buf, byteLen, async, fieldDescription), _serializerOptions)!; -#endif - } - - /// - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadCustom(buf, len, async, fieldDescription); - - /// - public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - if (_isJsonb) - { - var version = stream.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NpgsqlException($"Don't know how to decode jsonb with wire format {version}, your connection is now broken"); - } - - return _textHandler.GetTextReader(stream, buffer); - } - - byte[] SerializeJsonDocument(JsonDocument document) - { - // TODO: Writing is currently really inefficient - please don't criticize :) - // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). - using var stream = new MemoryStream(); - using var writer = new Utf8JsonWriter(stream); - document.WriteTo(writer); - writer.Flush(); - return stream.ToArray(); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs deleted file mode 100644 index 7b2735fcd3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs +++ /dev/null @@ -1,74 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL jsonpath data type. -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-json.html#DATATYPE-JSONPATH. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class JsonPathHandler : NpgsqlTypeHandler, ITextReaderHandler -{ - readonly TextHandler _textHandler; - - /// - /// Prepended to the string in the wire encoding - /// - const byte JsonPathVersion = 1; - - /// - protected internal JsonPathHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType) - => _textHandler = new TextHandler(postgresType, encoding); - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != JsonPathVersion) - throw new NotSupportedException($"Don't know how to decode JSONPATH with wire format {version}, your connection is now broken"); - - return await _textHandler.Read(buf, len - 1, async, fieldDescription); - } - - /// - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - 1 + _textHandler.ValidateAndGetLength(value, ref lengthCache, parameter); - - /// - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(JsonPathVersion); - - await _textHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - /// - public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != JsonPathVersion) - throw new NotSupportedException($"Don't know how to decode JSONPATH with wire format {version}, your connection is now broken"); - - return _textHandler.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs b/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs deleted file mode 100644 index 9f73a4fb97..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs +++ /dev/null @@ -1,90 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.LTreeHandlers; - -/// -/// LQuery binary encoding is a simple UTF8 string, but prepended with a version number. -/// -public class LQueryHandler : TextHandler -{ - /// - /// Prepended to the string in the wire encoding - /// - const byte LQueryProtocolVersion = 1; - - internal override bool PreferTextWrite => false; - - protected internal LQueryHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType, encoding) {} - - #region Write - - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Read - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != LQueryProtocolVersion) - throw new NotSupportedException($"Don't know how to decode lquery with wire format {version}, your connection is now broken"); - - return await base.Read(buf, len - 1, async, fieldDescription); - } - - #endregion - - public override TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != LQueryProtocolVersion) - throw new NpgsqlException($"Don't know how to decode lquery with wire format {version}, your connection is now broken"); - - return base.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs b/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs deleted file mode 100644 index 4f43266d8f..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs +++ /dev/null @@ -1,90 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.LTreeHandlers; - -/// -/// Ltree binary encoding is a simple UTF8 string, but prepended with a version number. -/// -public class LTreeHandler : TextHandler -{ - /// - /// Prepended to the string in the wire encoding - /// - const byte LtreeProtocolVersion = 1; - - internal override bool PreferTextWrite => false; - - protected internal LTreeHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType, encoding) {} - - #region Write - - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LtreeProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LtreeProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LtreeProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Read - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != LtreeProtocolVersion) - throw new NotSupportedException($"Don't know how to decode ltree with wire format {version}, your connection is now broken"); - - return await base.Read(buf, len - 1, async, fieldDescription); - } - - #endregion - - public override TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != LtreeProtocolVersion) - throw new NpgsqlException($"Don't know how to decode ltree with wire format {version}, your connection is now broken"); - - return base.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs b/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs deleted file mode 100644 index dcde2a1d73..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs +++ /dev/null @@ -1,93 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.LTreeHandlers; - -/// -/// LTxtQuery binary encoding is a simple UTF8 string, but prepended with a version number. -/// -public class LTxtQueryHandler : TextHandler -{ - /// - /// Prepended to the string in the wire encoding - /// - const byte LTxtQueryProtocolVersion = 1; - - internal override bool PreferTextWrite => false; - - protected internal LTxtQueryHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType, encoding) {} - - #region Write - - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - - public override int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - - public override int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LTxtQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LTxtQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LTxtQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Read - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != LTxtQueryProtocolVersion) - throw new NotSupportedException($"Don't know how to decode ltxtquery with wire format {version}, your connection is now broken"); - - return await base.Read(buf, len - 1, async, fieldDescription); - } - - #endregion - - public override TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != LTxtQueryProtocolVersion) - throw new NpgsqlException($"Don't know how to decode ltxtquery with wire format {version}, your connection is now broken"); - - return base.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs deleted file mode 100644 index c6b68096ab..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs +++ /dev/null @@ -1,192 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -public partial class MultirangeHandler : NpgsqlTypeHandler[]>, - INpgsqlTypeHandler>> -{ - /// - /// The type handler for the range that this multirange type holds - /// - protected RangeHandler RangeHandler { get; } - - /// - public MultirangeHandler(PostgresMultirangeType pgMultirangeType, RangeHandler rangeHandler) - : base(pgMultirangeType) - => RangeHandler = rangeHandler; - - public override ValueTask[]> Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - protected async ValueTask[]> ReadMultirangeArray( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new NpgsqlRange[numRanges]; - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange[i] = await RangeHandler.ReadRange(buf, rangeLen, async, fieldDescription); - } - - return multirange; - } - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - protected async ValueTask>> ReadMultirangeList( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new List>(numRanges); - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange.Add(await RangeHandler.ReadRange(buf, rangeLen, async, fieldDescription)); - } - - return multirange; - } - - public override int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - protected int ValidateAndGetLengthMultirange( - IList> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - lengthCache.Set(0); - - var sum = 4 + 4 * value.Count; - for (var i = 0; i < value.Count; i++) - sum += RangeHandler.ValidateAndGetLength(value[i], ref lengthCache, parameter); - - lengthCache.Lengths[pos] = sum; - return sum; - } - - public override Task Write( - NpgsqlRange[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write( - List> value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public async Task WriteMultirange( - IList> value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Count); - - for (var i = 0; i < value.Count; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } -} - -public class MultirangeHandler : MultirangeHandler, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>> -{ - /// - public MultirangeHandler(PostgresMultirangeType pgMultirangeType, RangeHandler rangeHandler) - : base(pgMultirangeType, rangeHandler) {} - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public Task Write( - List> value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write( - NpgsqlRange[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - List> converted => ((INpgsqlTypeHandler>>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - List> converted => ((INpgsqlTypeHandler>>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - NpgsqlRange[] converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - NpgsqlRange[] converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - List> converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - List> converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs deleted file mode 100644 index 6d5eb29f10..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System.Net; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -#pragma warning disable 618 - -namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; - -/// -/// A type handler for the PostgreSQL cidr data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-net-types.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class CidrHandler : NpgsqlSimpleTypeHandler<(IPAddress Address, int Subnet)>, INpgsqlSimpleTypeHandler -{ - public CidrHandler(PostgresType pgType) : base(pgType) {} - - /// - public override (IPAddress Address, int Subnet) Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => InetHandler.DoRead(buf, len, fieldDescription, true); - - NpgsqlInet INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var (address, subnet) = Read(buf, len, fieldDescription); - return new NpgsqlInet(address, subnet); - } - - /// - public override int ValidateAndGetLength((IPAddress Address, int Subnet) value, NpgsqlParameter? parameter) - => InetHandler.GetLength(value.Address); - - /// - public int ValidateAndGetLength(NpgsqlInet value, NpgsqlParameter? parameter) - => InetHandler.GetLength(value.Address); - - /// - public override void Write((IPAddress Address, int Subnet) value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => InetHandler.DoWrite(value.Address, value.Subnet, buf, true); - - /// - public void Write(NpgsqlInet value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => InetHandler.DoWrite(value.Address, value.Netmask, buf, true); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs deleted file mode 100644 index 276ca158f7..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs +++ /dev/null @@ -1,131 +0,0 @@ -using System; -using System.Diagnostics; -using System.Net; -using System.Net.Sockets; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -#pragma warning disable 618 - -namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; - -/// -/// A type handler for the PostgreSQL cidr data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-net-types.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class InetHandler : NpgsqlSimpleTypeHandlerWithPsv, - INpgsqlSimpleTypeHandler -{ - // ReSharper disable InconsistentNaming - const byte IPv4 = 2; - const byte IPv6 = 3; - // ReSharper restore InconsistentNaming - - public InetHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override IPAddress Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => DoRead(buf, len, fieldDescription, false).Address; - -#pragma warning disable CA1801 // Review unused parameters - internal static (IPAddress Address, int Subnet) DoRead( - NpgsqlReadBuffer buf, - int len, - FieldDescription? fieldDescription, - bool isCidrHandler) - { - buf.ReadByte(); // addressFamily - var mask = buf.ReadByte(); - var isCidr = buf.ReadByte() == 1; - Debug.Assert(isCidrHandler == isCidr); - var numBytes = buf.ReadByte(); - var bytes = new byte[numBytes]; - for (var i = 0; i < bytes.Length; i++) - bytes[i] = buf.ReadByte(); - - return (new IPAddress(bytes), mask); - } -#pragma warning restore CA1801 // Review unused parameters - - /// - protected override (IPAddress Address, int Subnet) ReadPsv(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => DoRead(buf, len, fieldDescription, false); - - NpgsqlInet INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var (address, subnet) = DoRead(buf, len, fieldDescription, false); - return new NpgsqlInet(address, subnet); - } - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(IPAddress value, NpgsqlParameter? parameter) - => GetLength(value); - - /// - public override int ValidateAndGetLength((IPAddress Address, int Subnet) value, NpgsqlParameter? parameter) - => GetLength(value.Address); - - /// - public int ValidateAndGetLength(NpgsqlInet value, NpgsqlParameter? parameter) - => GetLength(value.Address); - - /// - public override void Write(IPAddress value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => DoWrite(value, -1, buf, false); - - /// - public override void Write((IPAddress Address, int Subnet) value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => DoWrite(value.Address, value.Subnet, buf, false); - - /// - public void Write(NpgsqlInet value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => DoWrite(value.Address, value.Netmask, buf, false); - - internal static void DoWrite(IPAddress ip, int mask, NpgsqlWriteBuffer buf, bool isCidrHandler) - { - switch (ip.AddressFamily) { - case AddressFamily.InterNetwork: - buf.WriteByte(IPv4); - if (mask == -1) - mask = 32; - break; - case AddressFamily.InterNetworkV6: - buf.WriteByte(IPv6); - if (mask == -1) - mask = 128; - break; - default: - throw new InvalidCastException($"Can't handle IPAddress with AddressFamily {ip.AddressFamily}, only InterNetwork or InterNetworkV6!"); - } - - buf.WriteByte((byte)mask); - buf.WriteByte((byte)(isCidrHandler ? 1 : 0)); // Ignored on server side - var bytes = ip.GetAddressBytes(); - buf.WriteByte((byte)bytes.Length); - buf.WriteBytes(bytes, 0, bytes.Length); - } - - internal static int GetLength(IPAddress value) - => value.AddressFamily switch - { - AddressFamily.InterNetwork => 8, - AddressFamily.InterNetworkV6 => 20, - _ => throw new InvalidCastException($"Can't handle IPAddress with AddressFamily {value.AddressFamily}, only InterNetwork or InterNetworkV6!") - }; - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs deleted file mode 100644 index 26ade3e22b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs +++ /dev/null @@ -1,52 +0,0 @@ -using System.Diagnostics; -using System.Net.NetworkInformation; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; - -/// -/// A type handler for the PostgreSQL macaddr and macaddr8 data types. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-net-types.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class MacaddrHandler : NpgsqlSimpleTypeHandler -{ - public MacaddrHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override PhysicalAddress Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(len == 6 || len == 8); - - var bytes = new byte[len]; - - buf.ReadBytes(bytes, 0, len); - return new PhysicalAddress(bytes); - } - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(PhysicalAddress value, NpgsqlParameter? parameter) - => value.GetAddressBytes().Length; - - /// - public override void Write(PhysicalAddress value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var bytes = value.GetAddressBytes(); - buf.WriteBytes(bytes, 0, bytes.Length); - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs deleted file mode 100644 index 0115728f33..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs +++ /dev/null @@ -1,150 +0,0 @@ -using System; -using System.Runtime.InteropServices; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -[StructLayout(LayoutKind.Explicit)] -struct DecimalRaw -{ - const int SignMask = unchecked((int)0x80000000); - const int ScaleMask = 0x00FF0000; - const int ScaleShift = 16; - - // Fast access for 10^n where n is 0-9 - internal static readonly uint[] Powers10 = - { - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000 - }; - - // The maximum power of 10 that a 32 bit unsigned integer can store - internal static readonly int MaxUInt32Scale = Powers10.Length - 1; - - // Do not change the order in which these fields are declared. It - // should be same as in the System.Decimal struct. - [FieldOffset(0)] - decimal _value; - [FieldOffset(0)] - int _flags; - [FieldOffset(4)] - uint _high; - [FieldOffset(8)] - uint _low; - [FieldOffset(12)] - uint _mid; - - public bool Negative => (_flags & SignMask) != 0; - - public int Scale - { - get => (_flags & ScaleMask) >> ScaleShift; - set => _flags = (_flags & SignMask) | ((value << ScaleShift) & ScaleMask); - } - - public uint High => _high; - public uint Mid => _mid; - public uint Low => _low; - public decimal Value => _value; - - public DecimalRaw(decimal value) : this() => _value = value; - - public DecimalRaw(long value) : this() - { - if (value >= 0) - _flags = 0; - else - { - _flags = SignMask; - value = -value; - } - - _low = (uint)value; - _mid = (uint)(value >> 32); - _high = 0; - } - - public static void Negate(ref DecimalRaw value) - => value._flags ^= SignMask; - - public static void Add(ref DecimalRaw value, uint addend) - { - uint integer; - uint sum; - - integer = value._low; - value._low = sum = integer + addend; - - if (sum >= integer && sum >= addend) - return; - - integer = value._mid; - value._mid = sum = integer + 1; - - if (sum >= integer && sum >= 1) - return; - - integer = value._high; - value._high = sum = integer + 1; - - if (sum < integer || sum < 1) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - } - - public static void Multiply(ref DecimalRaw value, uint multiplier) - { - ulong integer; - uint remainder; - - integer = (ulong)value._low * multiplier; - value._low = (uint)integer; - remainder = (uint)(integer >> 32); - - integer = (ulong)value._mid * multiplier + remainder; - value._mid = (uint)integer; - remainder = (uint)(integer >> 32); - - integer = (ulong)value._high * multiplier + remainder; - value._high = (uint)integer; - remainder = (uint)(integer >> 32); - - if (remainder != 0) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - } - - public static uint Divide(ref DecimalRaw value, uint divisor) - { - ulong integer; - uint remainder = 0; - - if (value._high != 0) - { - integer = value._high; - value._high = (uint)(integer / divisor); - remainder = (uint)(integer % divisor); - } - - if (value._mid != 0 || remainder != 0) - { - integer = ((ulong)remainder << 32) | value._mid; - value._mid = (uint)(integer / divisor); - remainder = (uint)(integer % divisor); - } - - if (value._low != 0 || remainder != 0) - { - integer = ((ulong)remainder << 32) | value._low; - value._low = (uint)(integer / divisor); - remainder = (uint)(integer % divisor); - } - - return remainder; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs deleted file mode 100644 index 33b1bae14c..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs +++ /dev/null @@ -1,32 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL double precision data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class DoubleHandler : NpgsqlSimpleTypeHandler -{ - public DoubleHandler(PostgresType pgType) : base(pgType) {} - - /// - public override double Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadDouble(); - - /// - public override int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - => 8; - - /// - public override void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteDouble(value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs deleted file mode 100644 index 30c704e574..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs +++ /dev/null @@ -1,109 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL smallint data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class Int16Handler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public Int16Handler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override short Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt16(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((byte)Read(buf, len, fieldDescription)); - - sbyte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((sbyte)Read(buf, len, fieldDescription)); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - float INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - decimal INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(short value, NpgsqlParameter? parameter) => 2; - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 2; - /// - public int ValidateAndGetLength(sbyte value, NpgsqlParameter? parameter) => 2; - /// - public int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) => 2; - - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public int ValidateAndGetLength(float value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public override void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16(value); - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16(value); - /// - public void Write(sbyte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16(value); - /// - public void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs deleted file mode 100644 index 3b778d9a70..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs +++ /dev/null @@ -1,96 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL integer data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class Int32Handler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public Int32Handler(PostgresType pgType) : base(pgType) {} - - #region Read - - public override int Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt32(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((byte)Read(buf, len, fieldDescription)); - - short INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((short)Read(buf, len, fieldDescription)); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - float INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - decimal INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => 4; - /// - public int ValidateAndGetLength(short value, NpgsqlParameter? parameter) => 4; - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 4; - /// - public int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) => 4; - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - { - _ = checked((int)value); - return 4; - } - - /// - public int ValidateAndGetLength(float value, NpgsqlParameter? parameter) - { - _ = checked((int)value); - return 4; - } - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - { - _ = checked((int)value); - return 4; - } - - /// - public override void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32(value); - /// - public void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32(value); - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32(value); - /// - public void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - /// - public void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs deleted file mode 100644 index 7a39de1856..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs +++ /dev/null @@ -1,92 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL bigint data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class Int64Handler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public Int64Handler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override long Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt64(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((byte)Read(buf, len, fieldDescription)); - - short INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((short)Read(buf, len, fieldDescription)); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((int)Read(buf, len, fieldDescription)); - - float INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - decimal INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(long value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(short value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) => 8; - - /// - public int ValidateAndGetLength(float value, NpgsqlParameter? parameter) - { - _ = checked((long)value); - return 8; - } - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - { - _ = checked((long)value); - return 8; - } - - /// - public override void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64((long)value); - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64((long)value); - /// - public void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64((long)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs deleted file mode 100644 index ebab3d3fb9..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs +++ /dev/null @@ -1,52 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL money data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-money.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class MoneyHandler : NpgsqlSimpleTypeHandler -{ - public MoneyHandler(PostgresType pgType) : base(pgType) {} - - const int MoneyScale = 2; - - /// - public override decimal Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new DecimalRaw(buf.ReadInt64()) { Scale = MoneyScale }.Value; - - /// - public override int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) - => value < -92233720368547758.08M || value > 92233720368547758.07M - ? throw new OverflowException($"The supplied value ({value}) is outside the range for a PostgreSQL money value.") - : 8; - - /// - public override void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var raw = new DecimalRaw(value); - - var scaleDifference = MoneyScale - raw.Scale; - if (scaleDifference > 0) - DecimalRaw.Multiply(ref raw, DecimalRaw.Powers10[scaleDifference]); - else - { - value = Math.Round(value, MoneyScale, MidpointRounding.AwayFromZero); - raw = new DecimalRaw(value); - } - - var result = (long)raw.Mid << 32 | raw.Low; - if (raw.Negative) result = -result; - buf.WriteInt64(result); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs deleted file mode 100644 index 1e624f86f7..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs +++ /dev/null @@ -1,434 +0,0 @@ -using System; -using System.Globalization; -using System.Numerics; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL numeric data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class NumericHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler -{ - public NumericHandler(PostgresType pgType) : base(pgType) {} - - const int MaxDecimalScale = 28; - - const int SignPositive = 0x0000; - const int SignNegative = 0x4000; - const int SignNan = 0xC000; - const int SignPinf = 0xD000; - const int SignNinf = 0xF000; - const int SignSpecialMask = 0xC000; - - const int MaxGroupCount = 8; - const int MaxGroupScale = 4; - - static readonly uint MaxGroupSize = DecimalRaw.Powers10[MaxGroupScale]; - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4 * sizeof(short), async); - var result = new DecimalRaw(); - var groups = buf.ReadInt16(); - var weight = buf.ReadInt16() - groups + 1; - var sign = buf.ReadUInt16(); - - if ((sign & SignSpecialMask) == SignSpecialMask) - { - throw sign switch - { - SignNan => new InvalidCastException("Numeric NaN not supported by System.Decimal"), - SignPinf => new InvalidCastException("Numeric Infinity not supported by System.Decimal"), - SignNinf => new InvalidCastException("Numeric -Infinity not supported by System.Decimal"), - _ => new InvalidCastException($"Numeric special value {sign} not supported by System.Decimal") - }; - } - - if (sign == SignNegative) - DecimalRaw.Negate(ref result); - - var scale = buf.ReadInt16(); - if (scale < 0 is var exponential && exponential) - scale = (short)(-scale); - else - result.Scale = scale; - - if (scale > MaxDecimalScale) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - - var scaleDifference = exponential - ? weight * MaxGroupScale - : weight * MaxGroupScale + scale; - - if (groups > MaxGroupCount) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - - await buf.Ensure(groups * sizeof(ushort), async); - - if (groups == MaxGroupCount) - { - while (groups-- > 1) - { - DecimalRaw.Multiply(ref result, MaxGroupSize); - DecimalRaw.Add(ref result, buf.ReadUInt16()); - } - - var group = buf.ReadUInt16(); - var groupSize = DecimalRaw.Powers10[-scaleDifference]; - if (group % groupSize != 0) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - - DecimalRaw.Multiply(ref result, MaxGroupSize / groupSize); - DecimalRaw.Add(ref result, group / groupSize); - } - else - { - while (groups-- > 0) - { - DecimalRaw.Multiply(ref result, MaxGroupSize); - DecimalRaw.Add(ref result, buf.ReadUInt16()); - } - - if (scaleDifference < 0) - DecimalRaw.Divide(ref result, DecimalRaw.Powers10[-scaleDifference]); - else - while (scaleDifference > 0) - { - var scaleChunk = Math.Min(DecimalRaw.MaxUInt32Scale, scaleDifference); - DecimalRaw.Multiply(ref result, DecimalRaw.Powers10[scaleChunk]); - scaleDifference -= scaleChunk; - } - } - - return result.Value; - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (byte)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (short)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (int)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (long)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (float)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (double)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4 * sizeof(short), async); - - var groups = (int)buf.ReadUInt16(); - var weightLeft = (int)buf.ReadInt16(); - var weightRight = weightLeft - groups + 1; - var sign = buf.ReadUInt16(); - buf.ReadInt16(); // dscale - - if (groups == 0) - { - return sign switch - { - SignPositive or SignNegative => BigInteger.Zero, - SignNan => throw new InvalidCastException("Numeric NaN not supported by BigInteger"), - SignPinf => throw new InvalidCastException("Numeric Infinity not supported by BigInteger"), - SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by BigInteger"), - _ => throw new InvalidCastException($"Numeric special value {sign} not supported") - }; - } - - if (weightRight < 0) - { - await buf.Skip(groups * sizeof(ushort), async); - throw new InvalidCastException("Numeric value with non-zero fractional digits not supported by BigInteger"); - } - - var digits = new ushort[groups]; - - for (var i = 0; i < groups; i++) - { - await buf.Ensure(sizeof(ushort), async); - digits[i] = buf.ReadUInt16(); - } - - // Calculate powers 10^8, 10^16, 10^32, ... - // We should have the last calculated power to be less than the input - var lenPow = 2; // 2 ushorts fit in one uint, represents 10^8 - var numPowers = 0; - while (lenPow < weightLeft + 1) - { - lenPow <<= 1; - ++numPowers; - } - var factors = numPowers > 0 ? new BigInteger[numPowers] : null; - if (numPowers > 0) - { - factors![0] = new BigInteger(100000000U); - for (var i = 1; i < numPowers; i++) - factors[i] = factors[i - 1] * factors[i - 1]; - } - - var result = ToBigIntegerInner(0, weightLeft + 1, digits, factors); - return sign == SignPositive ? result : -result; - - static BigInteger ToBigIntegerInner(int offset, int length, ushort[] digits, BigInteger[]? factors) - { - if (length <= 2) - { - var r = 0U; - for (var i = offset; i < offset + length; i++) - { - r *= 10000U; - r += i < digits.Length ? digits[i] : 0U; - } - return r; - } - else - { - // Split the input into two halves, the lower one should be a power of two in digit length, - // then multiply the higher part with a precomputed power of 10^8 and add the results. - var lenFirstHalf = 2 << 1; // 2 ushorts fit in one uint, skip 1 since we've already covered the base case. - var pos = 0; - while (lenFirstHalf < length) - { - lenFirstHalf <<= 1; - ++pos; - } - var factor = factors![pos]; - lenFirstHalf >>= 1; - var lo = ToBigIntegerInner(offset + length - lenFirstHalf, lenFirstHalf, digits, factors); - var hi = ToBigIntegerInner(offset, length - lenFirstHalf, digits, factors); - return hi * factor + lo; // .NET uses Karatsuba multiplication, so this will be fast. - } - } - } - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(decimal value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var groupCount = 0; - var raw = new DecimalRaw(value); - if (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - { - uint remainder = default; - var scaleChunk = raw.Scale % MaxGroupScale; - if (scaleChunk > 0) - { - var divisor = DecimalRaw.Powers10[scaleChunk]; - var multiplier = DecimalRaw.Powers10[MaxGroupScale - scaleChunk]; - remainder = DecimalRaw.Divide(ref raw, divisor) * multiplier; - } - - while (remainder == 0) - remainder = DecimalRaw.Divide(ref raw, MaxGroupSize); - - groupCount++; - - while (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - { - DecimalRaw.Divide(ref raw, MaxGroupSize); - groupCount++; - } - } - - return lengthCache.Set((4 + groupCount) * sizeof(short)); - } - - /// - public int ValidateAndGetLength(short value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(int value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(long value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(float value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(double value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(byte value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - - public override async Task Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < (4 + MaxGroupCount) * sizeof(short)) - await buf.Flush(async, cancellationToken); - - WriteInner(new DecimalRaw(value), buf); - - static void WriteInner(DecimalRaw raw, NpgsqlWriteBuffer buf) - { - var weight = 0; - var groupCount = 0; - Span groups = stackalloc short[MaxGroupCount]; - groups.Fill(0); // SkipLocalsInit - - if (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - { - var scale = raw.Scale; - weight = -scale / MaxGroupScale - 1; - - uint remainder; - var scaleChunk = scale % MaxGroupScale; - if (scaleChunk > 0) - { - var divisor = DecimalRaw.Powers10[scaleChunk]; - var multiplier = DecimalRaw.Powers10[MaxGroupScale - scaleChunk]; - remainder = DecimalRaw.Divide(ref raw, divisor) * multiplier; - - if (remainder != 0) - { - weight--; - goto WriteGroups; - } - } - - while ((remainder = DecimalRaw.Divide(ref raw, MaxGroupSize)) == 0) - weight++; - - WriteGroups: - groups[groupCount++] = (short)remainder; - - while (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - groups[groupCount++] = (short)DecimalRaw.Divide(ref raw, MaxGroupSize); - } - - buf.WriteInt16(groupCount); - buf.WriteInt16(groupCount + weight); - buf.WriteInt16(raw.Negative ? SignNegative : SignPositive); - buf.WriteInt16(raw.Scale); - - while (groupCount > 0) - buf.WriteInt16(groups[--groupCount]); - } - } - - /// - public Task Write(short value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(int value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(long value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(byte value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(float value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(double value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - - static ushort[] FromBigInteger(BigInteger value) - { - var str = value.ToString(CultureInfo.InvariantCulture); - if (str == "0") - return new ushort[4]; - - var negative = str[0] == '-'; - var strLen = str.Length; - var numGroups = (strLen - (negative ? 1 : 0) + 3) / 4; - - if (numGroups > 131072 / 4) - throw new InvalidCastException("Cannot write a BigInteger with more than 131072 digits"); - - var result = new ushort[4 + numGroups]; - - var strPos = strLen - numGroups * 4; - - var firstDigit = 0; - for (var i = 0; i < 4; i++) - { - if (strPos >= 0 && str[strPos] != '-') - firstDigit = firstDigit * 10 + (str[strPos] - '0'); - strPos++; - } - - result[4] = (ushort)firstDigit; - - for (var i = 1; i < numGroups; i++) - { - result[4 + i] = (ushort)((((str[strPos++] - '0') * 10 + (str[strPos++] - '0')) * 10 + (str[strPos++] - '0')) * 10 + - (str[strPos++] - '0')); - - } - - var lastNonZeroDigitPos = numGroups - 1; - while (result[4 + lastNonZeroDigitPos] == 0) - lastNonZeroDigitPos--; - - result[0] = (ushort)(lastNonZeroDigitPos + 1); // number of items in array - result[1] = (ushort)(numGroups - 1); // weight - result[2] = (ushort)(negative ? SignNegative : SignPositive); - result[3] = 0; // dscale - - return result; - } - - public int ValidateAndGetLength(BigInteger value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var result = FromBigInteger(value); - if (parameter != null) - parameter.ConvertedValue = result; - - return lengthCache.Set((4 + result[0]) * sizeof(ushort)); - } - - public async Task Write(BigInteger value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - { - var result = (ushort[])(parameter?.ConvertedValue ?? FromBigInteger(value))!; - var len = 4 + result[0]; - var pos = 0; - while (len-- > 0) - { - if (buf.WriteSpaceLeft < sizeof(ushort)) - await buf.Flush(async, cancellationToken); - buf.WriteUInt16(result[pos++]); - } - } - - #endregion -} diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs deleted file mode 100644 index 09554db1e9..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs +++ /dev/null @@ -1,45 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL real data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class SingleHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public SingleHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override float Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadSingle(); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) => 4; - /// - public override int ValidateAndGetLength(float value, NpgsqlParameter? parameter) => 4; - - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteSingle((float)value); - /// - public override void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteSingle(value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs deleted file mode 100644 index 1ea4633289..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs +++ /dev/null @@ -1,31 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for PostgreSQL unsigned 32-bit data types. This is only used for internal types. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-oid.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class UInt32Handler : NpgsqlSimpleTypeHandler -{ - public UInt32Handler(PostgresType pgType) : base(pgType) {} - - /// - public override uint Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadUInt32(); - - /// - public override int ValidateAndGetLength(uint value, NpgsqlParameter? parameter) => 4; - - /// - public override void Write(uint value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteUInt32(value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs deleted file mode 100644 index db6d00d1db..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs +++ /dev/null @@ -1,29 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for PostgreSQL unsigned 64-bit data types. This is only used for internal types. -/// -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class UInt64Handler : NpgsqlSimpleTypeHandler -{ - public UInt64Handler(PostgresType pgType) : base(pgType) {} - - /// - public override ulong Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadUInt64(); - - /// - public override int ValidateAndGetLength(ulong value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(ulong value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteUInt64(value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs deleted file mode 100644 index 8c9792aabb..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs +++ /dev/null @@ -1,188 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for PostgreSQL range types. -/// -/// -/// See https://www.postgresql.org/docs/current/static/rangetypes.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -/// The range subtype. -// NOTE: This cannot inherit from NpgsqlTypeHandler>, since that triggers infinite generic recursion in Native AOT -public partial class RangeHandler : NpgsqlTypeHandler, INpgsqlTypeHandler> -{ - /// - /// The type handler for the subtype that this range type holds - /// - protected NpgsqlTypeHandler SubtypeHandler { get; } - - /// - public RangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType) - => SubtypeHandler = subtypeHandler; - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler>(pgArrayType, this, arrayNullabilityMode); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); - - #region Read - - /// - public ValueTask> Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadRange(buf, len, async, fieldDescription); - - protected internal async ValueTask> ReadRange(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(1, async); - - var flags = (RangeFlags)buf.ReadByte(); - if ((flags & RangeFlags.Empty) != 0) - return NpgsqlRange.Empty; - - var lowerBound = default(TAnySubtype); - var upperBound = default(TAnySubtype); - - if ((flags & RangeFlags.LowerBoundInfinite) == 0) - lowerBound = await SubtypeHandler.ReadWithLength(buf, async); - - if ((flags & RangeFlags.UpperBoundInfinite) == 0) - upperBound = await SubtypeHandler.ReadWithLength(buf, async); - - return new NpgsqlRange(lowerBound, upperBound, flags); - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await Read(buf, len, async, fieldDescription); - - #endregion - - #region Write - - /// - public int ValidateAndGetLength(NpgsqlRange value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - protected internal int ValidateAndGetLengthRange(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var totalLen = 1; - var lengthCachePos = lengthCache?.Position ?? 0; - if (!value.IsEmpty) - { - if (!value.LowerBoundInfinite) - { - totalLen += 4; - if (value.LowerBound is not null) - totalLen += SubtypeHandler.ValidateAndGetLength(value.LowerBound, ref lengthCache, null); - } - - if (!value.UpperBoundInfinite) - { - totalLen += 4; - if (value.UpperBound is not null) - totalLen += SubtypeHandler.ValidateAndGetLength(value.UpperBound, ref lengthCache, null); - } - } - - // If we're traversing an already-populated length cache, rewind to first element slot so that - // the elements' handlers can access their length cache values - if (lengthCache != null && lengthCache.IsPopulated) - lengthCache.Position = lengthCachePos; - - return totalLen; - } - - /// - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - protected internal async Task WriteRange(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte((byte)value.Flags); - - if (value.IsEmpty) - return; - - if (!value.LowerBoundInfinite) - await SubtypeHandler.WriteWithLength(value.LowerBound, buf, lengthCache, null, async, cancellationToken); - - if (!value.UpperBoundInfinite) - await SubtypeHandler.WriteWithLength(value.UpperBound, buf, lengthCache, null, async, cancellationToken); - } - - #endregion -} - -/// -/// Type handler for PostgreSQL range types. -/// -/// -/// Introduced in PostgreSQL 9.2. -/// https://www.postgresql.org/docs/current/static/rangetypes.html -/// -/// The main range subtype. -/// An alternative range subtype. -public class RangeHandler : RangeHandler, INpgsqlTypeHandler> -{ - /// - public RangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType, subtypeHandler) {} - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - /// - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - /// - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - NpgsqlRange converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - NpgsqlRange converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - NpgsqlRange converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - NpgsqlRange converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs b/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs deleted file mode 100644 index 04b16bca62..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Linq; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Type handler for PostgreSQL record types. Defaults to returning object[], but can also return or . -/// -/// -/// https://www.postgresql.org/docs/current/static/datatype-pseudo.html -/// -/// Encoding (identical to composite): -/// A 32-bit integer with the number of columns, then for each column: -/// * An OID indicating the type of the column -/// * The length of the column(32-bit integer), or -1 if null -/// * The column data encoded as binary -/// -sealed partial class RecordHandler : NpgsqlTypeHandler -{ - readonly TypeMapper _typeMapper; - - public RecordHandler(PostgresType postgresType, TypeMapper typeMapper) - : base(postgresType) - => _typeMapper = typeMapper; - - #region Read - - protected internal override async ValueTask ReadCustom( - NpgsqlReadBuffer buf, - int len, - bool async, - FieldDescription? fieldDescription) - { - if (typeof(T) == typeof(object[])) - return (T)(object)await Read(buf, len, async, fieldDescription); - - if (typeof(T).FullName?.StartsWith("System.ValueTuple`", StringComparison.Ordinal) == true || - typeof(T).FullName?.StartsWith("System.Tuple`", StringComparison.Ordinal) == true) - { - var asArray = await Read(buf, len, async, fieldDescription); - if (typeof(T).GenericTypeArguments.Length != asArray.Length) - throw new InvalidCastException($"Cannot read record type with {asArray.Length} fields as {typeof(T)}"); - - var constructor = typeof(T).GetConstructors().Single(c => c.GetParameters().Length == asArray.Length); - return (T)constructor.Invoke(asArray); - } - - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await Read(buf, len, async, fieldDescription); - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var fieldCount = buf.ReadInt32(); - var result = new object[fieldCount]; - - for (var i = 0; i < fieldCount; i++) - { - await buf.Ensure(8, async); - var typeOID = buf.ReadUInt32(); - var fieldLen = buf.ReadInt32(); - if (fieldLen == -1) // Null field, simply skip it and leave at default - continue; - result[i] = await _typeMapper.ResolveByOID(typeOID).ReadAsObject(buf, fieldLen, async); - } - - return result; - } - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); - - #endregion - - #region Write (unsupported) - - public override int ValidateAndGetLength(object[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException("Can't write record types"); - - public override Task Write( - object[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => throw new NotSupportedException("Can't write record types"); - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs deleted file mode 100644 index e3c5f957d4..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ /dev/null @@ -1,307 +0,0 @@ -using System; -using System.Data; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for PostgreSQL character data types (text, char, varchar, xml...). -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-character.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TextHandler : NpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler>, - INpgsqlTypeHandler, INpgsqlTypeHandler, ITextReaderHandler -{ - // Text types are handled a bit more efficiently when sent as text than as binary - // see https://github.com/npgsql/npgsql/issues/1210#issuecomment-235641670 - internal override bool PreferTextWrite => true; - - readonly Encoding _encoding; - - /// - protected internal TextHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType) - => _encoding = encoding; - - #region Read - - /// - public override ValueTask Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription = null) - { - return buf.ReadBytesLeft >= byteLen - ? new ValueTask(buf.ReadString(byteLen)) - : ReadLong(buf, byteLen, async); - - static async ValueTask ReadLong(NpgsqlReadBuffer buf, int byteLen, bool async) - { - if (byteLen <= buf.Size) - { - // The string's byte representation can fit in our read buffer, read it. - while (buf.ReadBytesLeft < byteLen) - await buf.ReadMore(async); - return buf.ReadString(byteLen); - } - - // Bad case: the string's byte representation doesn't fit in our buffer. - // This is rare - will only happen in CommandBehavior.Sequential mode (otherwise the - // entire row is in memory). Tweaking the buffer length via the connection string can - // help avoid this. - - // Allocate a temporary byte buffer to hold the entire string and read it in chunks. - var tempBuf = new byte[byteLen]; - var pos = 0; - while (true) - { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(tempBuf, pos, len); - pos += len; - if (pos < byteLen) - { - await buf.ReadMore(async); - continue; - } - break; - } - return buf.TextEncoding.GetString(tempBuf); - } - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription) - { - if (byteLen <= buf.Size) - { - // The string's byte representation can fit in our read buffer, read it. - while (buf.ReadBytesLeft < byteLen) - await buf.ReadMore(async); - return buf.ReadChars(byteLen); - } - - // TODO: The following can be optimized with Decoder - no need to allocate a byte[] - var tempBuf = new byte[byteLen]; - var pos = 0; - while (true) - { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(tempBuf, pos, len); - pos += len; - if (pos < byteLen) - { - await buf.ReadMore(async); - continue; - } - break; - } - return buf.TextEncoding.GetChars(tempBuf); - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - // Make sure we have enough bytes in the buffer for a single character - var maxBytes = Math.Min(buf.TextEncoding.GetMaxByteCount(1), len); - while (buf.ReadBytesLeft < maxBytes) - await buf.ReadMore(async); - - return ReadCharCore(); - - unsafe char ReadCharCore() - { - var decoder = buf.TextEncoding.GetDecoder(); - -#if NETSTANDARD2_0 - var singleCharArray = new char[1]; - decoder.Convert(buf.Buffer, buf.ReadPosition, maxBytes, singleCharArray, 0, 1, true, out var bytesUsed, out var charsUsed, out _); -#else - Span singleCharArray = stackalloc char[1]; - decoder.Convert(buf.Buffer.AsSpan(buf.ReadPosition, maxBytes), singleCharArray, true, out var bytesUsed, out var charsUsed, out _); -#endif - - buf.Skip(len - bytesUsed); - - if (charsUsed < 1) - throw new NpgsqlException("Could not read char - string was empty"); - - return singleCharArray[0]; - } - } - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing ArraySegment to PostgreSQL text is supported, no reading."); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription) - { - var bytes = new byte[byteLen]; - if (buf.ReadBytesLeft >= byteLen) - { - buf.ReadBytes(bytes, 0, byteLen); - return new ValueTask(bytes); - } - return ReadLong(buf, bytes, byteLen, async); - - static async ValueTask ReadLong(NpgsqlReadBuffer buf, byte[] bytes, int byteLen, bool async) - { - if (byteLen <= buf.Size) - { - // The bytes can fit in our read buffer, read it. - while (buf.ReadBytesLeft < byteLen) - await buf.ReadMore(async); - buf.ReadBytes(bytes, 0, byteLen); - return bytes; - } - - // Bad case: the bytes don't fit in our buffer. - // This is rare - will only happen in CommandBehavior.Sequential mode (otherwise the - // entire row is in memory). Tweaking the buffer length via the connection string can - // help avoid this. - - var pos = 0; - while (true) - { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(bytes, pos, len); - pos += len; - if (pos < byteLen) - { - await buf.ReadMore(async); - continue; - } - break; - } - return bytes; - } - } - - #endregion - - #region Write - - /// - public override unsafe int ValidateAndGetLength(string value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - if (parameter == null || parameter.Size <= 0 || parameter.Size >= value.Length) - return lengthCache.Set(_encoding.GetByteCount(value)); - fixed (char* p = value) - return lengthCache.Set(_encoding.GetByteCount(p, parameter.Size)); - } - - /// - public virtual int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - return lengthCache.Set( - parameter == null || parameter.Size <= 0 || parameter.Size >= value.Length - ? _encoding.GetByteCount(value) - : _encoding.GetByteCount(value, 0, parameter.Size) - ); - } - - /// - public virtual int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - if (parameter?.Size > 0) - throw new ArgumentException($"Parameter {parameter.ParameterName} is of type ArraySegment and should not have its Size set", parameter.ParameterName); - - return lengthCache.Set(value.Array is null ? 0 : _encoding.GetByteCount(value.Array, value.Offset, value.Count)); - } - - /// - public int ValidateAndGetLength(char value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { -#if NETSTANDARD2_0 - var singleCharArray = new char[1]; -#else - Span singleCharArray = stackalloc char[1]; -#endif - - singleCharArray[0] = value; - return _encoding.GetByteCount(singleCharArray); - } - - /// - public int ValidateAndGetLength(byte[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Length; - - /// - public override Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteString(value, buf, lengthCache!, parameter, async, cancellationToken); - - /// - public virtual Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var charLen = parameter == null || parameter.Size <= 0 || parameter.Size >= value.Length - ? value.Length - : parameter.Size; - return buf.WriteChars(value, 0, charLen, lengthCache!.GetLast(), async, cancellationToken); - } - - /// - public virtual Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value.Array is null ? Task.CompletedTask : buf.WriteChars(value.Array, value.Offset, value.Count, lengthCache!.GetLast(), async, cancellationToken); - - Task WriteString(string str, NpgsqlWriteBuffer buf, NpgsqlLengthCache lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var charLen = parameter == null || parameter.Size <= 0 || parameter.Size >= str.Length - ? str.Length - : parameter.Size; - return buf.WriteString(str, charLen, lengthCache.GetLast(), async, cancellationToken); - } - - /// - public async Task Write(char value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < _encoding.GetMaxByteCount(1)) - await buf.Flush(async, cancellationToken); - WriteCharCore(value, buf); - - static unsafe void WriteCharCore(char value, NpgsqlWriteBuffer buf) - { -#if NETSTANDARD2_0 - var singleCharArray = new char[1]; - singleCharArray[0] = value; - buf.WriteChars(singleCharArray, 0, 1); -#else - Span singleCharArray = stackalloc char[1]; - singleCharArray[0] = value; - buf.WriteChars(singleCharArray); -#endif - } - } - - /// - public Task Write(byte[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => buf.WriteBytesRaw(value, async, cancellationToken); - - #endregion - - /// - public virtual TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var byteLength = (int)(stream.Length - stream.Position); - return buffer.ReadBytesLeft >= byteLength - ? buffer.GetPreparedTextReader(_encoding.GetString(buffer.Buffer, buffer.ReadPosition, byteLength), stream) - : new StreamReader(stream, _encoding); - } -} diff --git a/src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs b/src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs deleted file mode 100644 index 43d0be10fc..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs +++ /dev/null @@ -1,95 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Handles "conversions" for columns sent by the database with unknown OIDs. -/// This differs from TextHandler in that its a text-only handler (we don't want to receive binary -/// representations of the types registered here). -/// Note that this handler is also used in the very initial query that loads the OID mappings -/// (chicken and egg problem). -/// Also used for sending parameters with unknown types (OID=0) -/// -sealed class UnknownTypeHandler : TextHandler -{ - internal UnknownTypeHandler(Encoding encoding) - : base(UnknownBackendType.Instance, encoding) - { - } - - #region Read - - public override ValueTask Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription = null) - { - if (fieldDescription == null) - throw new Exception($"Received an unknown field but {nameof(fieldDescription)} is null (i.e. COPY mode)"); - - if (fieldDescription.IsBinaryFormat) - { - // At least get the name of the PostgreSQL type for the exception - throw new NotSupportedException( - buf.Connector.TypeMapper.DatabaseInfo.ByOID.TryGetValue(fieldDescription.TypeOID, out var pgType) - ? $"The field '{fieldDescription.Name}' has type '{pgType.DisplayName}', which is currently unknown to Npgsql. You can retrieve it as a string by marking it as unknown, please see the FAQ." - : $"The field '{fieldDescription.Name}' has a type currently unknown to Npgsql (OID {fieldDescription.TypeOID}). You can retrieve it as a string by marking it as unknown, please see the FAQ." - ); - } - - return base.Read(buf, byteLen, async, fieldDescription); - } - - #endregion Read - - #region Write - - // Allow writing anything that is a string or can be converted to one via the unknown type handler - - protected internal override int ValidateAndGetLengthCustom( - [DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateObjectAndGetLength(value, ref lengthCache, parameter); - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value is string asString) - return ValidateAndGetLength(asString, ref lengthCache, parameter); - - if (parameter == null) - throw CreateConversionButNoParamException(value.GetType()); - - var converted = Convert.ToString(value)!; - parameter.ConvertedValue = converted; - - return ValidateAndGetLength(converted, ref lengthCache, parameter); - } - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null or DBNull) - return base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - - var convertedValue = value is string asString - ? asString - : (string)parameter!.ConvertedValue!; - - if (buf.WriteSpaceLeft < 4) - return WriteWithLengthLong(value, convertedValue, buf, lengthCache, parameter, async, cancellationToken); - - buf.WriteInt32(ValidateObjectAndGetLength(value, ref lengthCache, parameter)); - return Write(convertedValue, buf, lengthCache, parameter, async, cancellationToken); - - async Task WriteWithLengthLong(object value, string convertedValue, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - await buf.Flush(async, cancellationToken); - buf.WriteInt32(ValidateObjectAndGetLength(value!, ref lengthCache, parameter)); - await Write(convertedValue, buf, lengthCache, parameter, async, cancellationToken); - } - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs b/src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs deleted file mode 100644 index d8accea307..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs +++ /dev/null @@ -1,149 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -sealed class UnmappedEnumHandler : TextHandler -{ - readonly INpgsqlNameTranslator _nameTranslator; - - // Note that a separate instance of UnmappedEnumHandler is created for each PG enum type, so concurrency isn't "really" needed. - // However, in theory multiple different CLR enums may be used with the same PG enum type, and even if there's only one, we only know - // about it late (after construction), when the user actually reads/writes with one. So this handler is fully thread-safe. - readonly ConcurrentDictionary _types = new(); - - internal UnmappedEnumHandler(PostgresEnumType pgType, INpgsqlNameTranslator nameTranslator, Encoding encoding) - : base(pgType, encoding) - => _nameTranslator = nameTranslator; - - #region Read - - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - var s = await base.Read(buf, len, async, fieldDescription); - if (typeof(TAny) == typeof(string)) - return (TAny)(object)s; - - var typeRecord = GetTypeRecord(typeof(TAny)); - - if (!typeRecord.LabelToEnum.TryGetValue(s, out var value)) - throw new InvalidCastException($"Received enum value '{s}' from database which wasn't found on enum {typeof(TAny)}"); - - // TODO: Avoid boxing - return (TAny)(object)value; - } - - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => base.Read(buf, len, async, fieldDescription); - - #endregion - - #region Write - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value is null || value is DBNull - ? 0 - : ValidateAndGetLength(value, ref lengthCache, parameter); - - protected internal override int ValidateAndGetLengthCustom(TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value!, ref lengthCache, parameter); - - [UnconditionalSuppressMessage("Unmapped enums currently aren't trimming-safe.", "IL2072")] - int ValidateAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var type = value.GetType(); - if (type == typeof(string)) - return base.ValidateAndGetLength((string)value, ref lengthCache, parameter); - - var typeRecord = GetTypeRecord(type); - - // TODO: Avoid boxing - return typeRecord.EnumToLabel.TryGetValue((Enum)value, out var str) - ? base.ValidateAndGetLength(str, ref lengthCache, parameter) - : throw new InvalidCastException($"Can't write value {value} as enum {type}"); - } - - // TODO: This boxes the enum (again) - protected override Task WriteWithLengthCustom(TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteObjectWithLength(value!, buf, lengthCache, parameter, async, cancellationToken); - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null || value is DBNull) - return WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken); - - if (buf.WriteSpaceLeft < 4) - return WriteWithLengthLong(value, buf, lengthCache, parameter, async, cancellationToken); - - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - return Write(value, buf, lengthCache, parameter, async, cancellationToken); - - async Task WriteWithLengthLong(object value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - await buf.Flush(async, cancellationToken); - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - await Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - } - - [UnconditionalSuppressMessage("Unmapped enums currently aren't trimming-safe.", "IL2072")] - internal Task Write(object value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = value.GetType(); - if (type == typeof(string)) - return base.Write((string)value, buf, lengthCache, parameter, async, cancellationToken); - - var typeRecord = GetTypeRecord(type); - - // TODO: Avoid boxing - if (!typeRecord.EnumToLabel.TryGetValue((Enum)value, out var str)) - throw new InvalidCastException($"Can't write value {value} as enum {type}"); - return base.Write(str, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Misc - - TypeRecord GetTypeRecord(Type type) - { -#if NETSTANDARD2_0 - return _types.GetOrAdd(type, t => CreateTypeRecord(t, _nameTranslator)); -#else - return _types.GetOrAdd(type, static (t, translator) => CreateTypeRecord(t, translator), _nameTranslator); -#endif - } - - static TypeRecord CreateTypeRecord(Type type, INpgsqlNameTranslator nameTranslator) - { - var enumToLabel = new Dictionary(); - var labelToEnum = new Dictionary(); - - foreach (var field in type.GetFields(BindingFlags.Static | BindingFlags.Public)) - { - var attribute = (PgNameAttribute?)field.GetCustomAttributes(typeof(PgNameAttribute), false).FirstOrDefault(); - var enumName = attribute?.PgName ?? nameTranslator.TranslateMemberName(field.Name); - var enumValue = (Enum)field.GetValue(null)!; - - enumToLabel[enumValue] = enumName; - labelToEnum[enumName] = enumValue; - } - - return new(enumToLabel, labelToEnum); - } - - #endregion - - record struct TypeRecord(Dictionary EnumToLabel, Dictionary LabelToEnum); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/UuidHandler.cs b/src/Npgsql/Internal/TypeHandlers/UuidHandler.cs deleted file mode 100644 index c70da8060d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UuidHandler.cs +++ /dev/null @@ -1,76 +0,0 @@ -using System; -using System.Runtime.InteropServices; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL uuid data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-uuid.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class UuidHandler : NpgsqlSimpleTypeHandler -{ - // The following table shows .NET GUID vs Postgres UUID (RFC 4122) layouts. - // - // Note that the first fields are converted from/to native endianness (handled by the Read* - // and Write* methods), while the last field is always read/written in big-endian format. - // - // We're passing BitConverter.IsLittleEndian to prevent reversing endianness on little-endian systems. - // - // | Bits | Bytes | Name | Endianness (GUID) | Endianness (RFC 4122) | - // | ---- | ----- | ----- | ----------------- | --------------------- | - // | 32 | 4 | Data1 | Native | Big | - // | 16 | 2 | Data2 | Native | Big | - // | 16 | 2 | Data3 | Native | Big | - // | 64 | 8 | Data4 | Big | Big | - - public UuidHandler(PostgresType pgType) : base(pgType) {} - - /// - public override Guid Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var raw = new GuidRaw - { - Data1 = buf.ReadInt32(), - Data2 = buf.ReadInt16(), - Data3 = buf.ReadInt16(), - Data4 = buf.ReadInt64(BitConverter.IsLittleEndian) - }; - - return raw.Value; - } - - /// - public override int ValidateAndGetLength(Guid value, NpgsqlParameter? parameter) - => 16; - - /// - public override void Write(Guid value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var raw = new GuidRaw(value); - - buf.WriteInt32(raw.Data1); - buf.WriteInt16(raw.Data2); - buf.WriteInt16(raw.Data3); - buf.WriteInt64(raw.Data4, BitConverter.IsLittleEndian); - } - - [StructLayout(LayoutKind.Explicit)] - struct GuidRaw - { - [FieldOffset(00)] public Guid Value; - [FieldOffset(00)] public int Data1; - [FieldOffset(04)] public short Data2; - [FieldOffset(06)] public short Data3; - [FieldOffset(08)] public long Data4; - public GuidRaw(Guid value) : this() => Value = value; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/VoidHandler.cs b/src/Npgsql/Internal/TypeHandlers/VoidHandler.cs deleted file mode 100644 index da24b58c75..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/VoidHandler.cs +++ /dev/null @@ -1,41 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// https://www.postgresql.org/docs/current/static/datatype-boolean.html -/// -sealed class VoidHandler : NpgsqlSimpleTypeHandler -{ - public VoidHandler(PostgresType pgType) : base(pgType) {} - - public override DBNull Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => DBNull.Value; - - public override int ValidateAndGetLength(DBNull value, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - - public override void Write(DBNull value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {nameof(VoidHandler)}") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {nameof(VoidHandler)}") - }; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs deleted file mode 100644 index 9a8ebf8cfa..0000000000 --- a/src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs +++ /dev/null @@ -1,47 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using Npgsql.BackendMessages; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Type handlers that wish to support reading other types in additional to the main one can -/// implement this interface for all those types. -/// -public interface INpgsqlSimpleTypeHandler -{ - /// - /// Reads a value of type with the given length from the provided buffer, - /// with the assumption that it is entirely present in the provided memory buffer and no I/O will be - /// required. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - T Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null); - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception should be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - int ValidateAndGetLength([DisallowNull] T value, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, with the assumption that there is enough space in the buffer - /// (no I/O will occur). The Npgsql core will have taken care of that. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - void Write([DisallowNull] T value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs deleted file mode 100644 index 939b7f92a0..0000000000 --- a/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs +++ /dev/null @@ -1,56 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Type handlers that wish to support reading other types in additional to the main one can -/// implement this interface for all those types. -/// -public interface INpgsqlTypeHandler -{ - /// - /// Reads a value of type with the given length from the provided buffer, - /// using either sync or async I/O. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null); - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception should be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// A cache where the length calculated during the validation phase can be stored for use at the writing phase. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - int ValidateAndGetLength([DisallowNull] T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer. - /// - /// The value to write. - /// The buffer to which to write. - /// A cache where the length calculated during the validation phase can be stored for use at the writing phase. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// - /// If I/O will be necessary (i.e. the buffer is full), determines whether it will be done synchronously or asynchronously. - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - Task Write([DisallowNull] T value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs b/src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs deleted file mode 100644 index b55000fadf..0000000000 --- a/src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System.Data.Common; -using System.IO; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Implemented by handlers which support , returns a standard -/// TextReader given a binary Stream. -/// -interface ITextReaderHandler -{ - TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer); -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs deleted file mode 100644 index b36381e9e8..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs +++ /dev/null @@ -1,65 +0,0 @@ -using System.Collections.Generic; -using System.Diagnostics; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// An array of cached lengths for the parameters sending process. -/// -/// When sending parameters, lengths need to be calculated more than once (once for Bind, once for -/// an array, once for the string within that array). This cache optimizes that. Lengths are added -/// to the cache, and then retrieved in the same order. -/// -public sealed class NpgsqlLengthCache -{ - public bool IsPopulated; - public int Position; - public List Lengths; - - public NpgsqlLengthCache() => Lengths = new List(); - - public NpgsqlLengthCache(int capacity) => Lengths = new List(capacity); - - /// - /// Stores a length value in the cache, to be fetched later via . - /// Called at the phase. - /// - /// The length parameter. - public int Set(int len) - { - Debug.Assert(!IsPopulated); - Lengths.Add(len); - Position++; - return len; - } - - /// - /// Retrieves a length value previously stored in the cache via . - /// Called at the writing phase, after validation has already occurred and the length cache is populated. - /// - /// - public int Get() - { - Debug.Assert(IsPopulated); - return Lengths[Position++]; - } - - internal int GetLast() - { - Debug.Assert(IsPopulated); - return Lengths[Position-1]; - } - - internal void Rewind() - { - Position = 0; - IsPopulated = true; - } - - internal void Clear() - { - Lengths.Clear(); - Position = 0; - IsPopulated = false; - } -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs deleted file mode 100644 index 84757171ae..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs +++ /dev/null @@ -1,78 +0,0 @@ -using System; -using System.Data.Common; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Base class for all simple type handlers, which read and write short, non-arbitrary lengthed -/// values to PostgreSQL. Provides a simpler API to implement when compared to - -/// Npgsql takes care of all I/O before calling into this type, so no I/O needs to be performed by it. -/// -/// -/// The default CLR type that this handler will read and write. For example, calling -/// on a column with this handler will return a value with type . -/// Type handlers can support additional types by implementing . -/// -public abstract class NpgsqlSimpleTypeHandler : NpgsqlTypeHandler, INpgsqlSimpleTypeHandler -{ - protected NpgsqlSimpleTypeHandler(PostgresType postgresType) : base(postgresType) {} - - /// - /// Reads a value of type with the given length from the provided buffer, - /// with the assumption that it is entirely present in the provided memory buffer and no I/O will be - /// required. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - public abstract TDefault Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null); - - public sealed override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => throw new NotSupportedException(); - - #region Write - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception shold be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - public abstract int ValidateAndGetLength(TDefault value, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, with the assumption that there is enough space in the buffer - /// (no I/O will occur). The Npgsql core will have taken care of that. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - public abstract void Write(TDefault value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter); - - /// - /// Simple type handlers override instead of this. - /// - public sealed override Task Write(TDefault value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => throw new NotSupportedException(); - - /// - /// Simple type handlers override instead of this. - /// - public sealed override int ValidateAndGetLength(TDefault value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs deleted file mode 100644 index f8e02a4a48..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs +++ /dev/null @@ -1,102 +0,0 @@ -using System; -using System.Data.Common; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandlers; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// A simple type handler that supports a provider-specific value in addition to its default value. -/// This is necessary mainly in cases where the CLR type cannot represent the full range of the PostgreSQL type, and a custom CLR type -/// is needed. The provider-specific type will be returned from calls to -/// . -/// -/// -/// The default CLR type that this handler will read and write. For example, calling -/// on a column with this handler will return a value with type . -/// Type handlers can support additional types by implementing . -/// -/// The provider-specific CLR type that this handler will read and write. -public abstract class NpgsqlSimpleTypeHandlerWithPsv : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public NpgsqlSimpleTypeHandlerWithPsv(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - /// Reads a value of type with the given length from the provided buffer, - /// with the assumption that it is entirely present in the provided memory buffer and no I/O will be - /// required. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - protected abstract TPsv ReadPsv(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null); - - TPsv INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ReadPsv(buf, len, fieldDescription); - - // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), - // we must use the bang operator here to tell the compiler that a null value will never returned. - - /// - /// Reads a column as the type handler's provider-specific type, assuming that it is already entirely - /// in memory (i.e. no I/O is necessary). Called by in non-sequential mode, which - /// buffers entire rows in memory. - /// - internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => Read(buf, len, fieldDescription)!; - - /// - /// Reads a column as the type handler's provider-specific type. If it is not already entirely in - /// memory, sync or async I/O will be performed as specified by . - /// - internal override async ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - #endregion Read - - #region Write - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception shold be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - public abstract int ValidateAndGetLength(TPsv value, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, with the assumption that there is enough space in the buffer - /// (no I/O will occur). The Npgsql core will have taken care of that. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - public abstract void Write(TPsv value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter); - - #endregion Write - - #region Misc - - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) - => typeof(TPsv); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandlerWithPsv(pgArrayType, this, arrayNullabilityMode); - - #endregion Misc -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs deleted file mode 100644 index f799a47351..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs +++ /dev/null @@ -1,289 +0,0 @@ -using System; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandlers; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Base class for all type handlers, which read and write CLR types into their PostgreSQL -/// binary representation. -/// Type handler writers shouldn't inherit from this class, inherit -/// or instead. -/// -public abstract class NpgsqlTypeHandler -{ - protected NpgsqlTypeHandler(PostgresType postgresType) - => PostgresType = postgresType; - - /// - /// The PostgreSQL type handled by this type handler. - /// - public PostgresType PostgresType { get; } - - #region Read - - /// - /// Reads a value of type with the given length from the provided buffer, - /// using either sync or async I/O. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - [MethodImpl(MethodImplOptions.AggressiveInlining)] - protected internal async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - switch (this) - { - case INpgsqlSimpleTypeHandler simpleTypeHandler: - await buf.Ensure(len, async); - return simpleTypeHandler.Read(buf, len, fieldDescription); - case INpgsqlTypeHandler typeHandler: - return await typeHandler.Read(buf, len, async, fieldDescription); - default: - return await ReadCustom(buf, len, async, fieldDescription); - } - } - - /// - /// Version of that's called when we know the entire value - /// is already buffered in memory (i.e. in non-sequential mode). - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public TAny Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(buf.ReadBytesLeft >= len); - - return this switch - { - INpgsqlSimpleTypeHandler simpleTypeHandler => simpleTypeHandler.Read(buf, len, fieldDescription), - INpgsqlTypeHandler typeHandler => typeHandler.Read(buf, len, async: false, fieldDescription).Result, - _ => ReadCustom(buf, len, async: false, fieldDescription).Result - }; - } - - protected internal virtual ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TAny).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TAny).Name}"); - - /// - /// Reads a column as the type handler's default read type. If it is not already entirely in - /// memory, sync or async I/O will be performed as specified by . - /// - public abstract ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null); - - /// - /// Version of that's called when we know the entire value - /// is already buffered in memory (i.e. in non-sequential mode). - /// - internal object ReadAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(buf.ReadBytesLeft >= len); - - return ReadAsObject(buf, len, async: false, fieldDescription).Result; - } - - /// - /// Reads a column as the type handler's provider-specific type. If it is not already entirely in - /// memory, sync or async I/O will be performed as specified by . - /// - internal virtual ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadAsObject(buf, len, async, fieldDescription); - - /// - /// Version of that's called when we know the entire value - /// is already buffered in memory (i.e. in non-sequential mode). - /// - internal virtual object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(buf.ReadBytesLeft >= len); - - return ReadPsvAsObject(buf, len, async: false, fieldDescription).Result; - } - - /// - /// Reads a value from the buffer, assuming our read position is at the value's preceding length. - /// If the length is -1 (null), this method will return the default value. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal async ValueTask ReadWithLength(NpgsqlReadBuffer buf, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var len = buf.ReadInt32(); - return len == -1 - ? default! - : NullableHandler.Exists - ? await NullableHandler.ReadAsync(this, buf, len, async, fieldDescription) - : await Read(buf, len, async, fieldDescription); - } - - #endregion - - #region Write - - /// - /// Called to validate and get the length of a value of a generic . - /// and must be handled before calling into this. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - protected internal int ValidateAndGetLength( - [DisallowNull] TAny value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - Debug.Assert(value is not DBNull); - - return this switch - { - INpgsqlSimpleTypeHandler simpleTypeHandler => simpleTypeHandler.ValidateAndGetLength(value, parameter), - INpgsqlTypeHandler typeHandler => typeHandler.ValidateAndGetLength(value, ref lengthCache, parameter), - _ => ValidateAndGetLengthCustom(value, ref lengthCache, parameter) - }; - } - - protected internal virtual int ValidateAndGetLengthCustom( - [DisallowNull] TAny value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var parameterName = parameter is null - ? null - : parameter.TrimmedName == string.Empty - ? parameter.Collection is { } paramCollection - ? $"${paramCollection.IndexOf(parameter) + 1}" - : null // in case of COPY operations parameter isn't bound to a collection - : parameter.TrimmedName; - - throw new InvalidCastException(parameterName is null - ? $"Cannot write a value of CLR type '{typeof(TAny)}' as database type '{PgDisplayName}'." - : $"Cannot write a value of CLR type '{typeof(TAny)}' as database type '{PgDisplayName}' for parameter '{parameterName}'."); - } - - /// - /// Called to write the value of a generic . - /// - /// - /// In the vast majority of cases writing a parameter to the buffer won't need to perform I/O. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public async Task WriteWithLength(TAny? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // TODO: Possibly do a sync path when we don't do I/O (e.g. simple type handler, no flush) - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - if (value is null or DBNull) - { - buf.WriteInt32(-1); - return; - } - - switch (this) - { - case INpgsqlSimpleTypeHandler simpleTypeHandler: - var len = simpleTypeHandler.ValidateAndGetLength(value, parameter); - buf.WriteInt32(len); - if (buf.WriteSpaceLeft < len) - await buf.Flush(async, cancellationToken); - simpleTypeHandler.Write(value, buf, parameter); - return; - case INpgsqlTypeHandler typeHandler: - buf.WriteInt32(typeHandler.ValidateAndGetLength(value, ref lengthCache, parameter)); - await typeHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - return; - default: - await WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - return; - } - } - - /// - /// Typically does not need to be overridden by type handlers, but may be needed in some - /// cases (e.g. . - /// Note that this method assumes it can write 4 bytes of length (already verified by - /// ). - /// - protected virtual Task WriteWithLengthCustom( - [DisallowNull] TAny value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken) - => throw new InvalidCastException($"Can't write '{typeof(TAny).Name}' with type handler '{GetType().Name}'"); - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception shold be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// If the byte length calculation is costly (e.g. for UTF-8 strings), its result can be stored in the - /// length cache to be reused in the writing process, preventing recalculation. - /// - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - // Source-generated - public abstract int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, using either sync or async I/O. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - // Source-generated - public abstract Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); - - #endregion Write - - #region Misc - - public abstract Type GetFieldType(FieldDescription? fieldDescription = null); - public abstract Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null); - - internal virtual bool PreferTextWrite => false; - - /// - /// Creates a type handler for arrays of this handler's type. - /// - public abstract NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode); - - /// - /// Creates a type handler for ranges of this handler's type. - /// - public abstract NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType); - - /// - /// Creates a type handler for multiranges of this handler's type. - /// - public abstract NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType); - - /// - /// Used to create an exception when the provided type can be converted and written, but an - /// instance of is required for caching of the converted value - /// (in . - /// - protected Exception CreateConversionButNoParamException(Type clrType) - => new InvalidCastException($"Can't convert .NET type '{clrType}' to PostgreSQL '{PgDisplayName}' within an array"); - - internal string PgDisplayName => PostgresType.DisplayName; - - #endregion Misc -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs deleted file mode 100644 index aa7f0b6606..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs +++ /dev/null @@ -1,79 +0,0 @@ -using System; -using System.Data.Common; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandlers; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Base class for all type handlers, which read and write CLR types into their PostgreSQL -/// binary representation. Unless your type is arbitrary-length, consider inheriting from -/// instead. -/// -/// -/// The default CLR type that this handler will read and write. For example, calling -/// on a column with this handler will return a value with type . -/// Type handlers can support additional types by implementing . -/// -public abstract class NpgsqlTypeHandler : NpgsqlTypeHandler, INpgsqlTypeHandler -{ - protected NpgsqlTypeHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - /// Reads a value of type with the given length from the provided buffer, - /// using either sync or async I/O. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - public abstract ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null); - - // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), - // we must use the bang operator here to tell the compiler that a null value will never returned. - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - #endregion Read - - #region Write - - /// - /// Called to validate and get the length of a value of a generic . - /// - public abstract int ValidateAndGetLength(TDefault value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - - /// - /// Called to write the value of a generic . - /// - public abstract Task Write(TDefault value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); - - #endregion Write - - #region Misc - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(TDefault); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(TDefault); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new RangeHandler(pgRangeType, this); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => new MultirangeHandler(pgMultirangeType, (RangeHandler)CreateRangeHandler(pgMultirangeType.Subrange)); - - #endregion Misc -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/NullableHandler.cs b/src/Npgsql/Internal/TypeHandling/NullableHandler.cs deleted file mode 100644 index e3bb0619ce..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NullableHandler.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; - -// ReSharper disable StaticMemberInGenericType -namespace Npgsql.Internal.TypeHandling; - -delegate T ReadDelegate(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null); -delegate ValueTask ReadAsyncDelegate(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLen, bool async, FieldDescription? fieldDescription = null); - -delegate int ValidateAndGetLengthDelegate(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); -delegate Task WriteAsyncDelegate(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); - -static class NullableHandler -{ - public static readonly Type? UnderlyingType; - public static readonly ReadDelegate Read = null!; - public static readonly ReadAsyncDelegate ReadAsync = null!; - public static readonly ValidateAndGetLengthDelegate ValidateAndGetLength = null!; - public static readonly WriteAsyncDelegate WriteAsync = null!; - - public static bool Exists => UnderlyingType != null; - - static NullableHandler() - { - UnderlyingType = Nullable.GetUnderlyingType(typeof(T)); - - if (UnderlyingType == null) - return; - - Read = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.ReadMethod); - ReadAsync = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.ReadAsyncMethod); - ValidateAndGetLength = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.ValidateMethod); - WriteAsync = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.WriteAsyncMethod); - } -} - -static class NullableHandler -{ - internal static readonly MethodInfo ReadMethod = new ReadDelegate(Read).Method.GetGenericMethodDefinition(); - internal static readonly MethodInfo ReadAsyncMethod = new ReadAsyncDelegate(ReadAsync).Method.GetGenericMethodDefinition(); - internal static readonly MethodInfo ValidateMethod = new ValidateAndGetLengthDelegate(ValidateAndGetLength).Method.GetGenericMethodDefinition(); - internal static readonly MethodInfo WriteAsyncMethod = new WriteAsyncDelegate(WriteAsync).Method.GetGenericMethodDefinition(); - - static T? Read(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription) - where T : struct - => handler.Read(buffer, columnLength, fieldDescription); - - static async ValueTask ReadAsync(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, bool async, FieldDescription? fieldDescription) - where T : struct - => await handler.Read(buffer, columnLength, async, fieldDescription); - - static int ValidateAndGetLength(NpgsqlTypeHandler handler, T? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - where T : struct - => value.HasValue ? handler.ValidateAndGetLength(value.Value, ref lengthCache, parameter) : 0; - - static Task WriteAsync(NpgsqlTypeHandler handler, T? value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - where T : struct - => value.HasValue - ? handler.WriteWithLength(value.Value, buffer, lengthCache, parameter, async, cancellationToken) - : handler.WriteWithLength(DBNull.Value, buffer, lengthCache, parameter, async, cancellationToken); - - internal static TDelegate CreateDelegate(Type underlyingType, MethodInfo method) - where TDelegate : Delegate - => (TDelegate)method.MakeGenericMethod(underlyingType).CreateDelegate(typeof(TDelegate)); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs deleted file mode 100644 index 4170f75c2b..0000000000 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs +++ /dev/null @@ -1,30 +0,0 @@ -using System; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// An Npgsql resolver for type handlers. Typically used by plugins to alter how Npgsql reads and writes values to PostgreSQL. -/// -public abstract class TypeHandlerResolver -{ - /// - /// Resolves a type handler given a PostgreSQL type name, corresponding to the typname column in the PostgreSQL pg_type catalog table. - /// - /// See . - public abstract NpgsqlTypeHandler? ResolveByDataTypeName(string typeName); - - /// - /// Resolves a type handler given a .NET CLR type. - /// - public abstract NpgsqlTypeHandler? ResolveByClrType(Type type); - - public virtual NpgsqlTypeHandler? ResolveValueDependentValue(object value) => null; - - public virtual NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) => null; - - /// - /// Gets type mapping information for a given PostgreSQL type. - /// Invoked in scenarios when mapping information is required, rather than a type handler for reading or writing. - /// - public abstract TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs deleted file mode 100644 index 78d3b5bc47..0000000000 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; - -namespace Npgsql.Internal.TypeHandling; - -public abstract class TypeHandlerResolverFactory -{ - public abstract TypeHandlerResolver Create(NpgsqlConnector connector); - - public abstract string? GetDataTypeNameByClrType(Type clrType); - public virtual string? GetDataTypeNameByValueDependentValue(object value) => null; - public abstract TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName); -} - -static class TypeHandlerResolverFactoryExtensions -{ - internal static TypeMappingInfo? GetMappingByClrType(this TypeHandlerResolverFactory factory, Type clrType) - => factory.GetDataTypeNameByClrType(clrType) is { } dataTypeName ? factory.GetMappingByDataTypeName(dataTypeName) : null; - - internal static TypeMappingInfo? GetMappingByValueDependentValue(this TypeHandlerResolverFactory factory, object value) - => factory.GetDataTypeNameByValueDependentValue(value) is { } dataTypeName ? factory.GetMappingByDataTypeName(dataTypeName) : null; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs b/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs deleted file mode 100644 index 8d61d1e13b..0000000000 --- a/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs +++ /dev/null @@ -1,22 +0,0 @@ -using System; -using System.Data; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandling; - -public class TypeMappingInfo -{ - public TypeMappingInfo(NpgsqlDbType? npgsqlDbType, string? dataTypeName, Type clrType) - => (NpgsqlDbType, DataTypeName, ClrTypes) = (npgsqlDbType, dataTypeName, new[] { clrType }); - - public TypeMappingInfo(NpgsqlDbType? npgsqlDbType, string? dataTypeName, params Type[] clrTypes) - => (NpgsqlDbType, DataTypeName, ClrTypes) = (npgsqlDbType, dataTypeName, clrTypes); - - public NpgsqlDbType? NpgsqlDbType { get; } - // Note that we can't cache the result due to nullable's assignment not being thread safe - public DbType DbType - => NpgsqlDbType is null ? DbType.Object : GlobalTypeMapper.NpgsqlDbTypeToDbType(NpgsqlDbType.Value); - public string? DataTypeName { get; } - public Type[] ClrTypes { get; } -} diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs new file mode 100644 index 0000000000..ad98d1613f --- /dev/null +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -0,0 +1,143 @@ +using System; +using System.Collections.Concurrent; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) + where TPgTypeId : struct +{ + // Mostly used for parameter writing, 8ns + readonly ConcurrentDictionary _cacheByClrType = new(); + + // Used for reading, occasionally for parameter writing where a db type was given. + // 8ns, about 10ns total to scan an array with 6, 7 different clr types under one pg type + readonly ConcurrentDictionary _cacheByPgTypeId = new(); + + static TypeInfoCache() + { + if (typeof(TPgTypeId) != typeof(Oid) && typeof(TPgTypeId) != typeof(DataTypeName)) + throw new InvalidOperationException("Cannot use this type argument."); + } + + /// + /// + /// + /// + /// + /// + /// + public PgTypeInfo? GetOrAddInfo(Type? type, TPgTypeId? pgTypeId) + { + if (pgTypeId is { } id) + { + if (_cacheByPgTypeId.TryGetValue(id, out var infos)) + if (FindMatch(type, infos) is { } info) + return info; + + return AddEntryById(type, id, infos); + } + + if (type is not null) + return _cacheByClrType.TryGetValue(type, out var info) ? info : AddByType(type); + + return null; + + PgTypeInfo? FindMatch(Type? type, (Type? Type, PgTypeInfo Info)[] infos) + { + for (var i = 0; i < infos.Length; i++) + { + ref var item = ref infos[i]; + if (item.Type == type) + return item.Info; + } + + return null; + } + + PgTypeInfo? AddByType(Type type) + { + // We don't pass PgTypeId as we're interested in default converters here. + var info = CreateInfo(type, null, options, validatePgTypeIds); + + return info is null + ? null + : _cacheByClrType.TryAdd(type, info) // We never remove entries so either of these branches will always succeed. + ? info + : _cacheByClrType[type]; + } + + PgTypeInfo? AddEntryById(Type? type, TPgTypeId pgTypeId, (Type? Type, PgTypeInfo Info)[]? infos) + { + if (CreateInfo(type, pgTypeId, options, validatePgTypeIds) is not { } info) + return null; + + var isDefaultInfo = type is null; + if (infos is null) + { + // Also add defaults by their info type to save a future resolver lookup + resize. + infos = isDefaultInfo + ? new [] { (type, info), (info.Type, info) } + : new [] { (type, info) }; + + if (_cacheByPgTypeId.TryAdd(pgTypeId, infos)) + return info; + } + + // We have to update it instead. + while (true) + { + infos = _cacheByPgTypeId[pgTypeId]; + if (FindMatch(type, infos) is { } racedInfo) + return racedInfo; + + // Also add defaults by their info type to save a future resolver lookup + resize. + var oldInfos = infos; + var hasExactType = false; + if (isDefaultInfo) + { + foreach (var oldInfo in oldInfos) + if (oldInfo.Type == info.Type) + hasExactType = true; + } + Array.Resize(ref infos, oldInfos.Length + (isDefaultInfo && !hasExactType ? 2 : 1)); + infos[oldInfos.Length] = (type, info); + if (isDefaultInfo && !hasExactType) + infos[oldInfos.Length + 1] = (info.Type, info); + + if (_cacheByPgTypeId.TryUpdate(pgTypeId, infos, oldInfos)) + return info; + } + } + + static PgTypeInfo? CreateInfo(Type? type, TPgTypeId? typeId, PgSerializerOptions options, bool validatePgTypeIds) + { + var pgTypeId = AsPgTypeId(typeId); + // Validate that we only pass data types that are supported by the backend. + var dataTypeName = pgTypeId is { } id ? (DataTypeName?)options.DatabaseInfo.GetDataTypeName(id, validate: validatePgTypeIds) : null; + var info = options.TypeInfoResolver.GetTypeInfo(type, dataTypeName, options); + if (info is null) + return null; + + if (pgTypeId is not null && info.PgTypeId != pgTypeId) + throw new InvalidOperationException("A Postgres type was passed but the resolved PgTypeInfo does not have an equal PgTypeId."); + + if (type is not null && info.Type != type) + { + // Types were not equal, throw for HasExactType = true, otherwise we throw when the returned type isn't assignable to the requested type. + if (info.HasExactType || !info.Type.IsAssignableTo(type)) + throw new InvalidOperationException($"A CLR type '{type}' was passed but the resolved PgTypeInfo does not have a compatible type: {info.Type}."); + } + + return info; + } + + static PgTypeId? AsPgTypeId(TPgTypeId? pgTypeId) + => pgTypeId switch + { + { } id when typeof(TPgTypeId) == typeof(DataTypeName) => new((DataTypeName)(object)id), + { } id => new((Oid)(object)id), + null => null + }; + } +} diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs new file mode 100644 index 0000000000..7f2346add0 --- /dev/null +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -0,0 +1,784 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Text; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal; + +/// +/// +/// +/// +/// +/// +/// Relevant for `PgProviderTypeInfo` only: whether the instance can be constructed without passing mapping.DataTypeName, an exception occurs otherwise. +/// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public delegate PgTypeInfo TypeInfoFactory(PgSerializerOptions options, TypeInfoMapping mapping, bool requiresDataTypeName); + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public enum MatchRequirement +{ + /// Match when the clr type and datatype name both match. + /// It's also the only requirement that participates in clr type fallback matching. + All, + /// Match when the datatype name or CLR type matches while the other also matches or is absent. + Single, + /// Match when the datatype name matches and the clr type also matches or is absent. + DataTypeName +} + +/// A factory for well-known PgConverters. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public static class PgConverterFactory +{ + public static PgConverter CreateArrayMultirangeConverter(PgConverter rangeConverter, PgSerializerOptions options) where T : notnull + => new MultirangeConverter(rangeConverter); + + public static PgConverter> CreateListMultirangeConverter(PgConverter rangeConverter, PgSerializerOptions options) where T : notnull + => new MultirangeConverter, T>(rangeConverter); + + public static PgConverter> CreateRangeConverter(PgConverter subTypeConverter, PgSerializerOptions options) + => new RangeConverter(subTypeConverter); + + public static PgConverter CreatePolymorphicArrayConverter(Func> arrayConverterFactory, Func> nullableArrayConverterFactory, PgSerializerOptions options) + => options.ArrayNullabilityMode switch + { + ArrayNullabilityMode.Never => arrayConverterFactory(), + ArrayNullabilityMode.Always => nullableArrayConverterFactory(), + ArrayNullabilityMode.PerInstance => new PolymorphicArrayConverter(arrayConverterFactory(), nullableArrayConverterFactory()), + _ => throw new ArgumentOutOfRangeException() + }; +} + +[DebuggerDisplay("{DebuggerDisplay,nq}")] +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public readonly struct TypeInfoMapping(Type type, string dataTypeName, TypeInfoFactory factory) +{ + // For objects it makes no sense to have clr type only matches by default, there are too many implementations. + + public TypeInfoFactory Factory { get; init; } = factory; + public Type Type { get; init; } = type; + public string DataTypeName { get; init; } = Postgres.DataTypeName.NormalizeName(dataTypeName); + + public MatchRequirement MatchRequirement { get; init; } = type == typeof(object) ? MatchRequirement.DataTypeName : MatchRequirement.All; + public Func? TypeMatchPredicate { get; init; } + + public bool TypeEquals(Type type) => TypeMatchPredicate?.Invoke(type) ?? Type == type; + + bool DataTypeNameEqualsCore(string dataTypeName) + { + var span = DataTypeName.AsSpan(); + return Postgres.DataTypeName.IsFullyQualified(span) + ? span.Equals(dataTypeName.AsSpan(), StringComparison.Ordinal) + : span.Equals(Postgres.DataTypeName.ValidatedName(dataTypeName).UnqualifiedNameSpan, StringComparison.Ordinal); + } + + internal bool DataTypeNameEquals(DataTypeName dataTypeName) + { + var value = dataTypeName.Value; + return DataTypeNameEqualsCore(value); + } + + public bool DataTypeNameEquals(string dataTypeName) + { + var normalized = Postgres.DataTypeName.NormalizeName(dataTypeName); + return DataTypeNameEqualsCore(normalized); + } + + string DebuggerDisplay + { + get + { + var builder = new StringBuilder() + .Append(Type.Name) + .Append(" <-> ") + .Append(Postgres.DataTypeName.FromDisplayName(DataTypeName).DisplayName); + + if (MatchRequirement is not MatchRequirement.All) + builder.Append($" ({MatchRequirement.ToString().ToLowerInvariant()})"); + + return builder.ToString(); + } + } +} + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public sealed class TypeInfoMappingCollection +{ + readonly TypeInfoMappingCollection? _baseCollection; + readonly List _items; + + public TypeInfoMappingCollection(int capacity = 0) + => _items = new(capacity); + + public TypeInfoMappingCollection() : this(0) { } + + // Not used for resolving, only for composing (arrays that need to find the element mapping etc). + public TypeInfoMappingCollection(TypeInfoMappingCollection baseCollection) : this(0) + => _baseCollection = baseCollection; + + public TypeInfoMappingCollection(IEnumerable items) + => _items = [..items]; + + public IReadOnlyList Items => _items; + + /// Returns the first default converter or the first converter that matches both type and dataTypeName. + /// If just a type was passed and no default was found we return the first converter with a type match. + public PgTypeInfo? Find(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + TypeInfoMapping? fallback = null; + foreach (var mapping in _items) + { + var looseTypeMatch = mapping.TypeMatchPredicate is { } pred ? pred(type) : type is null || mapping.Type == type; + var typeMatch = type is not null && looseTypeMatch; + var dataTypeMatch = dataTypeName is not null && mapping.DataTypeNameEquals(dataTypeName.Value); + + var matchRequirement = mapping.MatchRequirement; + if (dataTypeMatch && typeMatch + || matchRequirement is not MatchRequirement.All && dataTypeMatch && looseTypeMatch + || matchRequirement is MatchRequirement.Single && dataTypeName is null && typeMatch) + { + var resolvedDataTypeName = ResolveFullyQualifiedDataTypeName(dataTypeName, mapping.DataTypeName, options); + return mapping.Factory(options, mapping with { Type = type ?? mapping.Type, DataTypeName = resolvedDataTypeName }, dataTypeName is not null); + } + + // DataTypeName is explicitly requiring dataTypeName so it won't be used for a fallback, Single would have matched above already. + if (matchRequirement is MatchRequirement.All && fallback is null && dataTypeName is null && typeMatch) + fallback = mapping; + } + + if (fallback is { } fbMapping) + { + Debug.Assert(type is not null); + var resolvedDataTypeName = ResolveFullyQualifiedDataTypeName(dataTypeName, fbMapping.DataTypeName, options); + return fbMapping.Factory(options, fbMapping with { Type = type, DataTypeName = resolvedDataTypeName }, dataTypeName is not null); + } + + return null; + + static string ResolveFullyQualifiedDataTypeName(DataTypeName? dataTypeName, string mappingDataTypeName, PgSerializerOptions options) + { + // Make sure plugins (which match on unqualified names) and type info providers get the fully qualified name to canonicalize. + if (dataTypeName is not null) + return dataTypeName.GetValueOrDefault().Value; + + if (TypeInfoMappingHelpers.TryResolveFullyQualifiedName(options, mappingDataTypeName, out var fqDataTypeName)) + return fqDataTypeName.Value; + + throw new NotSupportedException($"Cannot resolve '{mappingDataTypeName}' to a fully qualified datatype name. The datatype was not found in the current database info."); + } + } + + bool TryGetMapping(Type type, string dataTypeName, out TypeInfoMapping value) + { + foreach (var mapping in _baseCollection?._items ?? _items) + { + // During mapping we just use look for the declared type, regardless of TypeMatchPredicate. + if (mapping.Type == type && mapping.DataTypeNameEquals(dataTypeName)) + { + value = mapping; + return true; + } + } + + value = default; + return false; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + TypeInfoMapping GetMapping(Type type, string dataTypeName) + => TryGetMapping(type, dataTypeName, out var info) ? info : throw new InvalidOperationException($"Could not find mapping for {type} <-> {dataTypeName}"); + + // Helper to eliminate generic display class duplication. + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool? supportsReading = null, bool? supportsWriting = null) + => (options, mapping, requiresDataTypeName) => + { + var resolvedInnerMapping = innerMapping; + if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) + resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; + + var innerConcrete = (PgConcreteTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); + var converter = mapper(mapping, innerConcrete); + var preferredFormat = copyPreferredFormat ? innerConcrete.PreferredFormat : null; + var readingSupported = innerConcrete.SupportsReading + && (supportsReading ?? PgConcreteTypeInfo.GetDefaultSupportsReading(converter.TypeToConvert, requestedType: mapping.Type)); + var writingSupported = innerConcrete.SupportsWriting && (supportsWriting ?? true); + + return new PgConcreteTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), requestedType: mapping.Type) + { + PreferredFormat = preferredFormat, + SupportsReading = readingSupported, + SupportsWriting = writingSupported + }; + }; + + // Helper to eliminate generic display class duplication. + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool? supportsReading = null, bool? supportsWriting = null) + => (options, mapping, requiresDataTypeName) => + { + var resolvedInnerMapping = innerMapping; + if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) + resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; + + var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); + + var providerInfo = (PgProviderTypeInfo)innerInfo; + var typeInfoProvider = mapper(mapping, providerInfo); + // We include the data type name if the inner info did so as well. + // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. + PgTypeId? pgTypeId = innerInfo.PgTypeId is not null + ? options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)) + : null; + return new PgProviderTypeInfo(options, typeInfoProvider, pgTypeId, requestedType: mapping.Type); + }; + + public void Add(TypeInfoMapping mapping) => _items.Add(mapping); + + public void AddRange(TypeInfoMappingCollection collection) => _items.AddRange(collection._items); + + Func GetDefaultConfigure(bool isDefault) + => GetDefaultConfigure(isDefault ? MatchRequirement.Single : MatchRequirement.All); + Func GetDefaultConfigure(MatchRequirement matchRequirement) + => matchRequirement switch + { + MatchRequirement.All => static mapping => mapping with { MatchRequirement = MatchRequirement.All }, + MatchRequirement.DataTypeName => static mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }, + MatchRequirement.Single => static mapping => mapping with { MatchRequirement = MatchRequirement.Single }, + _ => throw new ArgumentOutOfRangeException(nameof(matchRequirement), matchRequirement, null) + }; + + Func GetArrayTypeMatchPredicate(Func elementTypeMatchPredicate) + => type => type is null ? elementTypeMatchPredicate(null) : type.IsArray && elementTypeMatchPredicate(type.GetElementType()!); + Func GetListTypeMatchPredicate(Func elementTypeMatchPredicate) + => type => type is null ? elementTypeMatchPredicate(null) + // We anti-constrain on IsArray to avoid matching byte/sbyte, short/ushort int/uint + // with the list mapping of the earlier type when an exact match is probably available. + : !type.IsArray && typeof(IList).IsAssignableFrom(type) && elementTypeMatchPredicate(typeof(TElement)); + + public void AddType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class + => AddType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); + + public void AddType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class + => AddType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); + + public void AddType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class + { + var mapping = new TypeInfoMapping(typeof(T), dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + if (typeof(T) != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(typeof(T), mapping, static (_, info) => ((PgConcreteTypeInfo)info).Converter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); + _items.Add(mapping); + } + + public void AddProviderType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class + => AddProviderType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); + + public void AddProviderType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class + => AddProviderType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); + + public void AddProviderType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class + { + var mapping = new TypeInfoMapping(typeof(T), dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + if (typeof(T) != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(typeof(T), mapping, static (_, info) => PgProviderTypeInfo.GetProvider(info), copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); + _items.Add(mapping); + } + + + public void AddArrayType(string elementDataTypeName) where TElement : class + => AddArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + + public void AddArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class + => AddArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); + + public void AddArrayType(TypeInfoMapping elementMapping) where TElement : class + => AddArrayType(elementMapping, suppressObjectMapping: false); + + public void AddArrayType(TypeInfoMapping elementMapping, bool suppressObjectMapping) where TElement : class + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); + AddArrayType(elementMapping, typeof(IList), CreateListBasedConverter, listTypeMatchPredicate, suppressObjectMapping: true); + + void AddArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) + { + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + _items.Add(arrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => + { + if (!requiresDataTypeName) + throw new InvalidOperationException("Should not happen, please file a bug."); + + return arrayMapping.Factory(options, mapping, requiresDataTypeName); + })); + } + } + + public void AddProviderArrayType(string elementDataTypeName) where TElement : class + => AddProviderArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + + public void AddProviderArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class + => AddProviderArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); + + public void AddProviderArrayType(TypeInfoMapping elementMapping) where TElement : class + => AddProviderArrayType(elementMapping, suppressObjectMapping: false); + + public void AddProviderArrayType(TypeInfoMapping elementMapping, bool suppressObjectMapping) where TElement : class + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + AddProviderArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedTypeInfoProvider, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); + AddProviderArrayType(elementMapping, typeof(IList), CreateListBasedTypeInfoProvider, listTypeMatchPredicate, suppressObjectMapping: true); + + void AddProviderArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) + { + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + _items.Add(arrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => + { + if (!requiresDataTypeName) + throw new InvalidOperationException("Should not happen, please file a bug."); + + return arrayMapping.Factory(options, mapping, requiresDataTypeName); + })); + } + } + + public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct + => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverter((PgConverter)((PgConcreteTypeInfo)innerInfo).Converter), GetDefaultConfigure(isDefault)); + + public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct + => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverter((PgConverter)((PgConcreteTypeInfo)innerInfo).Converter), GetDefaultConfigure(matchRequirement)); + + public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct + => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverter((PgConverter)((PgConcreteTypeInfo)innerInfo).Converter), configure); + + // Lives outside to prevent capture of T. + void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, + Func nullableConverter, Func? configure) + { + var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + if (type != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(type, mapping, static (_, info) => ((PgConcreteTypeInfo)info).Converter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); + _items.Add(mapping); + _items.Add(new TypeInfoMapping(nullableType, dataTypeName, + CreateComposedFactory(nullableType, mapping, nullableConverter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement, + TypeMatchPredicate = mapping.TypeMatchPredicate is not null + ? matchType => matchType is null + ? mapping.TypeMatchPredicate(null) + : matchType == nullableType && mapping.TypeMatchPredicate(type) + : null + }); + } + + public void AddStructArrayType(string elementDataTypeName) where TElement : struct + => AddStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); + + public void AddStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct + => AddStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); + + public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct + => AddStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); + + public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, bool suppressObjectMapping) where TElement : struct + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || type == typeof(TElement?))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || type == typeof(TElement?))); + + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + AddStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), + CreateArrayBasedConverter, CreateArrayBasedConverter, + arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); + + // Don't add the object converter for the list based converter. + AddStructArrayType(elementMapping, nullableElementMapping, typeof(IList), typeof(IList), + CreateListBasedConverter, CreateListBasedConverter, + listTypeMatchPredicate, nullableListTypeMatchPredicate, suppressObjectMapping: true); + } + + // Lives outside to prevent capture of TElement. + void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, + Func converter, Func nullableConverter, + Func? typeMatchPredicate, Func? nullableTypeMatchPredicate, bool suppressObjectMapping) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter, supportsReading: true)) + { + MatchRequirement = arrayMapping.MatchRequirement, + TypeMatchPredicate = nullableTypeMatchPredicate + }; + + _items.Add(arrayMapping); + _items.Add(nullableArrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => + { + return options.ArrayNullabilityMode switch + { + _ when !requiresDataTypeName => throw new InvalidOperationException("Should not happen, please file a bug."), + ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, requiresDataTypeName), + ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), + ArrayNullabilityMode.PerInstance => CreateComposedPerInstance( + arrayMapping.Factory(options, mapping, requiresDataTypeName), + nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), + mapping.DataTypeName + ), + _ => throw new ArgumentOutOfRangeException() + }; + }) { MatchRequirement = MatchRequirement.DataTypeName }); + + PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerInfo, PgTypeInfo nullableInnerInfo, string dataTypeName) + { + var converter = + new PolymorphicArrayConverter( + (PgConverter)((PgConcreteTypeInfo)innerInfo).Converter, + (PgConverter)((PgConcreteTypeInfo)nullableInnerInfo).Converter); + + return new PgConcreteTypeInfo(innerInfo.Options, converter, + innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), requestedType: typeof(object)) { SupportsWriting = false }; + } + } + + public void AddProviderStructType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct + => AddProviderStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableTypeInfoProvider(innerInfo), GetDefaultConfigure(isDefault)); + + public void AddProviderStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct + => AddProviderStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableTypeInfoProvider(innerInfo), GetDefaultConfigure(matchRequirement)); + + public void AddProviderStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct + => AddProviderStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableTypeInfoProvider(innerInfo), configure); + + // Lives outside to prevent capture of T. + void AddProviderStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, + Func nullableConverter, Func? configure) + { + var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + if (type != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(type, mapping, static (_, info) => PgProviderTypeInfo.GetProvider(info), copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); + _items.Add(mapping); + _items.Add(new TypeInfoMapping(nullableType, dataTypeName, + CreateComposedFactory(nullableType, mapping, nullableConverter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement, + TypeMatchPredicate = mapping.TypeMatchPredicate is not null + ? matchType => matchType is null + ? mapping.TypeMatchPredicate(null) + : matchType == nullableType && mapping.TypeMatchPredicate(type) + : null + }); + } + + public void AddProviderStructArrayType(string elementDataTypeName) where TElement : struct + => AddProviderStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); + + public void AddProviderStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct + => AddProviderStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); + + public void AddProviderStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct + => AddProviderStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); + + public void AddProviderStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, bool suppressObjectMapping) where TElement : struct + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || type == typeof(TElement?))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || type == typeof(TElement?))); + + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + AddProviderStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), + CreateArrayBasedTypeInfoProvider, + CreateArrayBasedTypeInfoProvider, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); + + // Don't add the object converter for the list based converter. + AddProviderStructArrayType(elementMapping, nullableElementMapping, typeof(IList), typeof(IList), + CreateListBasedTypeInfoProvider, + CreateListBasedTypeInfoProvider, suppressObjectMapping: true, listTypeMatchPredicate, nullableListTypeMatchPredicate); + } + + // Lives outside to prevent capture of TElement. + void AddProviderStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, + Func converter, Func nullableConverter, + bool suppressObjectMapping, Func? typeMatchPredicate, Func? nullableTypeMatchPredicate) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter, supportsReading: true)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = nullableTypeMatchPredicate + }; + + _items.Add(arrayMapping); + _items.Add(nullableArrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => options.ArrayNullabilityMode switch + { + _ when !requiresDataTypeName => throw new InvalidOperationException("Should not happen, please file a bug."), + ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, requiresDataTypeName), + ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), + ArrayNullabilityMode.PerInstance => CreateComposedPerInstance( + arrayMapping.Factory(options, mapping, requiresDataTypeName), + nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), + mapping.DataTypeName + ), + _ => throw new ArgumentOutOfRangeException() + }) { MatchRequirement = MatchRequirement.DataTypeName }); + + PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerInfo, PgTypeInfo nullableInnerInfo, string dataTypeName) + { + var provider = + new PolymorphicArrayTypeInfoProvider((PgProviderTypeInfo)innerInfo, + (PgProviderTypeInfo)nullableInnerInfo); + + return new PgProviderTypeInfo(innerInfo.Options, provider, + innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), requestedType: typeof(object)); + } + } + + public void AddPolymorphicProviderArrayType(string elementDataTypeName, Func> elementToArrayConverterFactory) + => AddPolymorphicProviderArrayType(GetMapping(typeof(object), elementDataTypeName), elementToArrayConverterFactory); + + public void AddPolymorphicProviderArrayType(TypeInfoMapping elementMapping, Func> elementToArrayConverterFactory) + { + AddPolymorphicProviderArrayType(elementMapping, typeof(object), + (mapping, elementInfo) => new PolymorphicArrayTypeInfoProvider( + elementInfo.Options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), elementInfo, elementToArrayConverterFactory(elementInfo.Options)) + , null); + + void AddPolymorphicProviderArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + var mapping = new TypeInfoMapping(type, arrayDataTypeName, + CreateComposedFactory(typeof(Array), elementMapping, converter, supportsReading: true, supportsWriting: false)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + _items.Add(mapping); + } + } + + /// Returns whether type matches any of the types we register pg arrays as. + [UnconditionalSuppressMessage("Trimming", "IL2070", + Justification = "Checking for IList implementing types requires interface list enumeration which isn't compatible with trimming. " + + "However as long as a concrete IList is rooted somewhere in the app, for instance through an `AddArrayType(...)` mapping, every implementation must keep it.")] + // We care about IList implementations if the instantiation is actually rooted by us through an Array mapping. + // Dynamic resolvers are a notable counterexample, but they are all correctly marked with RequiresUnreferencedCode. + public static bool IsArrayLikeType(Type type, [NotNullWhen(true)] out Type? elementType) + { + if (type.GetElementType() is { } t) + { + elementType = t; + return true; + } + + if (type.IsConstructedGenericType && type.GetGenericTypeDefinition() is var def && (def == typeof(List<>) || def == typeof(IList<>))) + { + elementType = type.GetGenericArguments()[0]; + return true; + } + + foreach (var inf in type.GetInterfaces()) + { + if (inf.IsConstructedGenericType && inf.GetGenericTypeDefinition() == typeof(IList<>)) + { + elementType = inf.GetGenericArguments()[0]; + return true; + } + } + + elementType = null; + return false; + } + + static string GetArrayDataTypeName(string dataTypeName) + => DataTypeName.IsFullyQualified(dataTypeName.AsSpan()) + ? DataTypeName.ValidatedName(dataTypeName).ToArrayName().Value + : "_" + DataTypeName.FromDisplayName(dataTypeName).UnqualifiedName; + + static ArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + { + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: false); + + return ArrayConverter.CreateArrayBased((PgConcreteTypeInfo)elemInfo, mapping.Type); + } + + static ArrayConverter> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + { + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: false); + + return ArrayConverter>.CreateListBased((PgConcreteTypeInfo)elemInfo); + } + + static ArrayTypeInfoProvider CreateArrayBasedTypeInfoProvider(TypeInfoMapping mapping, PgProviderTypeInfo elemInfo) + { + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: true); + + return new ArrayTypeInfoProvider(elemInfo, mapping.Type); + } + + static ArrayTypeInfoProvider, TElement> CreateListBasedTypeInfoProvider(TypeInfoMapping mapping, PgProviderTypeInfo elemInfo) + { + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: true); + + return new ArrayTypeInfoProvider, TElement>(elemInfo, mapping.Type); + } + + [DoesNotReturn] + static void ThrowRequiresExactType(bool provider) + => throw new InvalidOperationException($"An exact-type info is required here; manually construct a mapping over a casting converter{(provider ? " type info provider" : "")} instead."); +} + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public static class TypeInfoMappingHelpers +{ + internal static bool TryResolveFullyQualifiedName(PgSerializerOptions options, string dataTypeName, out DataTypeName fqDataTypeName) + { + if (DataTypeName.IsFullyQualified(dataTypeName.AsSpan())) + { + fqDataTypeName = new DataTypeName(dataTypeName); + return true; + } + + if (options.DatabaseInfo.TryGetPostgresTypeByName(dataTypeName, out var pgType)) + { + fqDataTypeName = pgType.DataTypeName; + return true; + } + + fqDataTypeName = default; + return false; + } + + internal static PostgresType GetPgType(this TypeInfoMapping mapping, PgSerializerOptions options) + => options.DatabaseInfo.GetPostgresType(new DataTypeName(mapping.DataTypeName)); + + // NOTE: This method exists since 9.0 to be able to deprecate the method below that has optional arguments in 10.0 (potentially removing it directly or in 11.0). + // It reduces how binary breaking that change will be if this method would not be there to be picked for the most common invocations. + /// + /// Creates a PgTypeInfo from a mapping, optins, and a converter. + /// + /// The mapping to create an info for. + /// The options to use. + /// The converter to create a PgTypeInfo for. + /// The created info instance. + public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter) + => new PgConcreteTypeInfo(options, converter, new DataTypeName(mapping.DataTypeName)) + { + PreferredFormat = null, + SupportsWriting = true + }; + + /// + /// Creates a PgTypeInfo from a mapping, options, and a converter. + /// + /// The mapping to create an info for. + /// The options to use. + /// The converter to create a PgTypeInfo for. + /// Whether to prefer a specific data format for this info, when null it defaults to the most suitable format. + /// Whether the converters returned from the given provider support writing. + /// The created info instance. + public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter, DataFormat? preferredFormat = null, bool supportsWriting = true) + => new PgConcreteTypeInfo(options, converter, new DataTypeName(mapping.DataTypeName)) + { + PreferredFormat = preferredFormat, + SupportsWriting = supportsWriting + }; + + // NOTE: This method exists since 9.0 to be able to deprecate the method below that has optional arguments in 10.0 (potentially removing it directly or in 11.0). + // It reduces how binary breaking that change will be if this method would not be there to be picked for the most common invocations. + /// + /// Creates a PgProviderTypeInfo from a mapping, options, and a provider. + /// + /// The mapping to create an info for. + /// The options to use. + /// The provider to create a PgProviderTypeInfo for. + /// Whether to pass mapping.DataTypeName to the PgProviderTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). + /// The created info instance. + public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConcreteTypeInfoProvider provider, bool includeDataTypeName) + { + PgTypeId? pgTypeId = includeDataTypeName ? new PgTypeId(new DataTypeName(mapping.DataTypeName)) : null; + return new PgProviderTypeInfo(options, provider, pgTypeId); + } +} diff --git a/src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs b/src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs deleted file mode 100644 index aedc14e743..0000000000 --- a/src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeMapping; - -public interface IUserTypeMapping -{ - public string PgTypeName { get; } - public Type ClrType { get; } - - public NpgsqlTypeHandler CreateHandler(PostgresType pgType, NpgsqlConnector connector); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs b/src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs deleted file mode 100644 index 75d680b200..0000000000 --- a/src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs +++ /dev/null @@ -1,24 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandlers.CompositeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeMapping; - -public interface IUserCompositeTypeMapping : IUserTypeMapping -{ - INpgsqlNameTranslator NameTranslator { get; } -} - -sealed class UserCompositeTypeMapping : IUserCompositeTypeMapping -{ - public string PgTypeName { get; } - public Type ClrType => typeof(T); - public INpgsqlNameTranslator NameTranslator { get; } - - public UserCompositeTypeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - => (PgTypeName, NameTranslator) = (pgTypeName, nameTranslator); - - public NpgsqlTypeHandler CreateHandler(PostgresType pgType, NpgsqlConnector connector) - => new CompositeHandler((PostgresCompositeType)pgType, connector.TypeMapper, NameTranslator); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs b/src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs deleted file mode 100644 index 9c2c3e35d7..0000000000 --- a/src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs +++ /dev/null @@ -1,46 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Reflection; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeMapping; - -public interface IUserEnumTypeMapping : IUserTypeMapping -{ - INpgsqlNameTranslator NameTranslator { get; } -} - -sealed class UserEnumTypeMapping : IUserEnumTypeMapping - where TEnum : struct, Enum -{ - public string PgTypeName { get; } - public Type ClrType => typeof(TEnum); - public INpgsqlNameTranslator NameTranslator { get; } - - readonly Dictionary _enumToLabel = new(); - readonly Dictionary _labelToEnum = new(); - - public UserEnumTypeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - { - (PgTypeName, NameTranslator) = (pgTypeName, nameTranslator); - - foreach (var field in typeof(TEnum).GetFields(BindingFlags.Static | BindingFlags.Public)) - { - var attribute = (PgNameAttribute?)field.GetCustomAttributes(typeof(PgNameAttribute), false).FirstOrDefault(); - var enumName = attribute is null - ? nameTranslator.TranslateMemberName(field.Name) - : attribute.PgName; - var enumValue = (TEnum)field.GetValue(null)!; - - _enumToLabel[enumValue] = enumName; - _labelToEnum[enumName] = enumValue; - } - } - - public NpgsqlTypeHandler CreateHandler(PostgresType postgresType, NpgsqlConnector connector) - => new EnumHandler((PostgresEnumType)postgresType, _enumToLabel, _labelToEnum); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/ValueMetadata.cs b/src/Npgsql/Internal/ValueMetadata.cs new file mode 100644 index 0000000000..b71028c4a1 --- /dev/null +++ b/src/Npgsql/Internal/ValueMetadata.cs @@ -0,0 +1,12 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public readonly struct ValueMetadata +{ + public required DataFormat Format { get; init; } + public required Size BufferRequirement { get; init; } + public required Size Size { get; init; } + public object? WriteState { get; init; } +} diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index e2342775dd..5c1234bc74 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -1,7 +1,6 @@ -using System; +using System; using System.Diagnostics; using System.IO; -using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -12,25 +11,22 @@ namespace Npgsql; /// Launches MIT Kerberos klist and parses out the default principal from it. /// Caches the result. /// -sealed class KerberosUsernameProvider +static class KerberosUsernameProvider { - static bool _performedDetection; + static volatile bool _performedDetection; static string? _principalWithRealm; static string? _principalWithoutRealm; -#pragma warning disable CS1998 - internal static async ValueTask GetUsernameAsync(bool includeRealm, ILogger connectionLogger, bool async, CancellationToken cancellationToken) -#pragma warning restore CS1998 + internal static ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) { if (_performedDetection) - return includeRealm ? _principalWithRealm : _principalWithoutRealm; + return new(includeRealm ? _principalWithRealm : _principalWithoutRealm); var klistPath = FindInPath("klist"); if (klistPath == null) { connectionLogger.LogDebug("klist not found in PATH, skipping Kerberos username detection"); - return null; + return new((string?)null); } - var processStartInfo = new ProcessStartInfo { FileName = klistPath, @@ -38,46 +34,41 @@ sealed class KerberosUsernameProvider RedirectStandardError = true, UseShellExecute = false }; + var process = Process.Start(processStartInfo); if (process is null) { connectionLogger.LogDebug("klist process could not be started"); - return null; + return new((string?)null); } -#if NET5_0_OR_GREATER - if (async) - await process.WaitForExitAsync(cancellationToken); - else - // ReSharper disable once MethodHasAsyncOverloadWithCancellation - process.WaitForExit(); -#else - // ReSharper disable once MethodHasAsyncOverload - process.WaitForExit(); -#endif + return GetUsernameAsyncInternal(); - if (process.ExitCode != 0) + async ValueTask GetUsernameAsyncInternal() { - connectionLogger.LogDebug($"klist exited with code {process.ExitCode}: {process.StandardError.ReadToEnd()}"); - return null; - } + if (async) + await process.WaitForExitAsync(cancellationToken).ConfigureAwait(false); + else + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + process.WaitForExit(); - var line = default(string); - for (var i = 0; i < 2; i++) - // ReSharper disable once MethodHasAsyncOverload -#if NET7_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken) : process.StandardOutput.ReadLine()) == null) -#elif NET5_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync() : process.StandardOutput.ReadLine()) == null) -#else - if ((line = process.StandardOutput.ReadLine()) == null) -#endif + if (process.ExitCode != 0) { - connectionLogger.LogDebug("Unexpected output from klist, aborting Kerberos username detection"); + connectionLogger.LogDebug($"klist exited with code {process.ExitCode}: {process.StandardError.ReadToEnd()}"); return null; } - return ParseKListOutput(line!, includeRealm, connectionLogger); + var line = default(string); + for (var i = 0; i < 2; i++) + // ReSharper disable once MethodHasAsyncOverload + if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken).ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) + { + connectionLogger.LogDebug("Unexpected output from klist, aborting Kerberos username detection"); + return null; + } + + return ParseKListOutput(line!, includeRealm, connectionLogger); + } } static string? ParseKListOutput(string line, bool includeRealm, ILogger connectionLogger) @@ -107,8 +98,15 @@ sealed class KerberosUsernameProvider return includeRealm ? _principalWithRealm : _principalWithoutRealm; } - static string? FindInPath(string name) => Environment.GetEnvironmentVariable("PATH") - ?.Split(Path.PathSeparator) - .Select(p => Path.Combine(p, name)) - .FirstOrDefault(File.Exists); + static string? FindInPath(string name) + { + foreach (var p in Environment.GetEnvironmentVariable("PATH")?.Split(Path.PathSeparator) ?? []) + { + var path = Path.Combine(p, name); + if (File.Exists(path)) + return path; + } + + return null; + } } diff --git a/src/Npgsql/LogMessages.cs b/src/Npgsql/LogMessages.cs index 8d5f471c27..757f972764 100644 --- a/src/Npgsql/LogMessages.cs +++ b/src/Npgsql/LogMessages.cs @@ -26,12 +26,6 @@ static partial class LogMessages Message = "Opened connection to {Host}:{Port}/{Database}")] internal static partial void OpenedConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString, int ConnectorId); - [LoggerMessage( - EventId = NpgsqlEventId.OpenedConnection, - Level = LogLevel.Debug, - Message = "Opened multiplexing connection to {Host}:{Port}/{Database}")] - internal static partial void OpenedMultiplexingConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString); - [LoggerMessage( EventId = NpgsqlEventId.ClosingConnection, Level = LogLevel.Trace, @@ -44,12 +38,6 @@ static partial class LogMessages Message = "Closed connection to {Host}:{Port}/{Database}")] internal static partial void ClosedConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString, int ConnectorId); - [LoggerMessage( - EventId = NpgsqlEventId.ClosedConnection, - Level = LogLevel.Debug, - Message = "Closed multiplexing connection to {Host}:{Port}/{Database}")] - internal static partial void ClosedMultiplexingConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString); - [LoggerMessage( EventId = NpgsqlEventId.OpeningPhysicalConnection, Level = LogLevel.Trace, @@ -134,12 +122,6 @@ static partial class LogMessages Message = "Exception while closing connector")] internal static partial void ExceptionWhenClosingPhysicalConnection(ILogger logger, int ConnectorId, Exception exception); - [LoggerMessage( - EventId = NpgsqlEventId.ExceptionWhenOpeningConnectionForMultiplexing, - Level = LogLevel.Error, - Message = "Exception opening a connection for multiplexing")] - internal static partial void ExceptionWhenOpeningConnectionForMultiplexing(ILogger logger, Exception exception); - [LoggerMessage( Level = LogLevel.Trace, Message = "Start user action")] @@ -180,7 +162,7 @@ static partial class LogMessages Level = LogLevel.Debug, Message = "Executing batch: {BatchCommands}", SkipEnabledCheck = true)] - internal static partial void ExecutingBatchWithParameters(ILogger logger, (string CommandText, object[] Parameters)[] BatchCommands, int ConnectorId); + internal static partial void ExecutingBatchWithParameters(ILogger logger, (string CommandText, IEnumerable Parameters)[] BatchCommands, int ConnectorId); [LoggerMessage( EventId = NpgsqlEventId.CommandExecutionCompleted, @@ -209,7 +191,7 @@ static partial class LogMessages Message = "Batch execution completed (duration={DurationMs}ms): {BatchCommands}", SkipEnabledCheck = true)] internal static partial void BatchExecutionCompletedWithParameters( - ILogger logger, (string CommandText, object[] Parameters)[] BatchCommands, long DurationMs, int ConnectorId); + ILogger logger, (string CommandText, IEnumerable Parameters)[] BatchCommands, long DurationMs, int ConnectorId); [LoggerMessage( EventId = NpgsqlEventId.CancellingCommand, @@ -254,12 +236,6 @@ internal static partial void BatchExecutionCompletedWithParameters( Message = "Deriving Parameters for query: {CommandText}")] internal static partial void DerivingParameters(ILogger logger, string CommandText, int ConnectorId); - [LoggerMessage( - EventId = NpgsqlEventId.ExceptionWhenWritingMultiplexedCommands, - Level = LogLevel.Error, - Message = "Exception while writing multiplexed commands")] - internal static partial void ExceptionWhenWritingMultiplexedCommands(ILogger logger, int ConnectorId, Exception exception); - [LoggerMessage( Level = LogLevel.Trace, Message = "Cleaning up reader")] diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs new file mode 100644 index 0000000000..431c0ea734 --- /dev/null +++ b/src/Npgsql/MetricsReporter.cs @@ -0,0 +1,260 @@ +namespace Npgsql; + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.Metrics; +using System.Runtime.InteropServices; +using System.Threading; + +// .NET docs on metric instrumentation: https://learn.microsoft.com/en-us/dotnet/core/diagnostics/metrics-instrumentation +// OpenTelemetry semantic conventions for database metric: https://opentelemetry.io/docs/specs/semconv/database/database-metrics +sealed class MetricsReporter : IDisposable +{ + const string Version = "0.1.0"; + + static readonly Meter Meter; + + static readonly UpDownCounter CommandsExecuting; + static readonly Counter CommandsFailed; + static readonly Histogram CommandDuration; + + static readonly Counter BytesWritten; + static readonly Counter BytesRead; + + static readonly UpDownCounter PendingConnectionRequests; + static readonly Counter ConnectionTimeouts; + static readonly Histogram ConnectionCreateTime; + static readonly ObservableGauge PreparedRatio; + + readonly NpgsqlDataSource _dataSource; + + readonly KeyValuePair _poolNameTag; + readonly TagList _durationMetricTags; + + static readonly List Reporters = []; + + static readonly InstrumentAdvice ShortHistogramAdvice = new() + { + HistogramBucketBoundaries = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10] + }; + + CommandCounters _commandCounters; + + [StructLayout(LayoutKind.Explicit)] + struct CommandCounters + { + [FieldOffset(0)] internal int CommandsStarted; + [FieldOffset(4)] internal int PreparedCommandsStarted; + [FieldOffset(0)] internal long All; + } + + static MetricsReporter() + { + Meter = new("Npgsql", Version); + + // db.client.operation.duration is stable in the OpenTelemetry spec + CommandDuration = Meter.CreateHistogram( + "db.client.operation.duration", + unit: "s", + description: "Duration of database client operations.", + advice: ShortHistogramAdvice); + + // From here, metrics have "development" status (not stable) + Meter.CreateObservableUpDownCounter( + "db.client.connection.count", + GetConnectionCount, + unit: "{connection}", + description: "The number of connections that are currently in state described by the state attribute."); + + // It's a bit ridiculous to manage "max connections" as an observable counter, given that it never changes for a given pool. + // However, we can't simply report it once at startup, since clients who connect later wouldn't have it. And since reporting it + // repeatedly isn't possible because we need to provide incremental figures, we just manage it as an observable counter. + Meter.CreateObservableUpDownCounter( + "db.client.connection.max", + GetConnectionMax, + unit: "{connection}", + description: "The maximum number of open connections allowed."); + + // From here, metrics are entirely Npgsql-specific and not covered by the OpenTelemetry spec. + CommandsExecuting = Meter.CreateUpDownCounter( + "db.client.operation.npgsql.executing", + unit: "{command}", + description: "The number of currently executing database commands."); + + CommandsFailed = Meter.CreateCounter( + "db.client.operation.failed", + unit: "{command}", + description: "The number of database commands which have failed."); + + BytesWritten = Meter.CreateCounter( + "db.client.operation.npgsql.bytes_written", + unit: "By", + description: "The number of bytes written."); + + BytesRead = Meter.CreateCounter( + "db.client.operation.npgsql.bytes_read", + unit: "By", + description: "The number of bytes read."); + + PendingConnectionRequests = Meter.CreateUpDownCounter( + "db.client.connection.npgsql.pending_requests", + unit: "{request}", + description: "The number of pending requests for an open connection, cumulative for the entire pool."); + + ConnectionTimeouts = Meter.CreateCounter( + "db.client.connection.npgsql.timeouts", + unit: "{timeout}", + description: "The number of connection timeouts that have occurred trying to obtain a connection from the pool."); + + ConnectionCreateTime = Meter.CreateHistogram( + "db.client.connection.npgsql.create_time", + unit: "s", + description: "The time it took to create a new connection.", + advice: ShortHistogramAdvice); + + PreparedRatio = Meter.CreateObservableGauge( + "db.client.operation.npgsql.prepared_ratio", + GetPreparedCommandsRatio, + description: "The ratio of prepared command executions."); + } + + public MetricsReporter(NpgsqlDataSource dataSource) + { + _dataSource = dataSource; + _poolNameTag = new KeyValuePair("db.client.connection.pool.name", dataSource.Name); + + _durationMetricTags = new TagList + { + // TODO: Vary this for PG-like databases (e.g. CockroachDB)? + { "db.system.name", "postgresql" }, + { "db.client.connection.pool.name", _dataSource.Name }, + { "server.address", _dataSource.Settings.Host }, + { "server.port", _dataSource.Settings.Port } + }; + + lock (Reporters) + { + Reporters.Add(this); + Reporters.Sort((x,y) => string.Compare(x._dataSource.Name, y._dataSource.Name, StringComparison.Ordinal)); + } + } + + internal long ReportCommandStart() + { + CommandsExecuting.Add(1, _poolNameTag); + if (PreparedRatio.Enabled) + Interlocked.Increment(ref _commandCounters.CommandsStarted); + + return CommandDuration.Enabled ? Stopwatch.GetTimestamp() : 0; + } + + internal void ReportCommandStop(long startTimestamp) + { + CommandsExecuting.Add(-1, _poolNameTag); + + if (CommandDuration.Enabled && startTimestamp > 0) + { + CommandDuration.Record(Stopwatch.GetElapsedTime(startTimestamp).TotalSeconds, _durationMetricTags); + } + } + + internal void CommandStartPrepared() + { + if (PreparedRatio.Enabled) + Interlocked.Increment(ref _commandCounters.PreparedCommandsStarted); + } + + internal void ReportCommandFailed() => CommandsFailed.Add(1, _poolNameTag); + + internal void ReportBytesWritten(long bytesWritten) => BytesWritten.Add(bytesWritten, _poolNameTag); + internal void ReportBytesRead(long bytesRead) => BytesRead.Add(bytesRead, _poolNameTag); + + internal void ReportConnectionPoolTimeout() + => ConnectionTimeouts.Add(1, _poolNameTag); + + internal void ReportPendingConnectionRequestStart() + => PendingConnectionRequests.Add(1, _poolNameTag); + internal void ReportPendingConnectionRequestStop() + => PendingConnectionRequests.Add(-1, _poolNameTag); + + internal void ReportConnectionCreateTime(TimeSpan duration) + => ConnectionCreateTime.Record(duration.TotalSeconds, _poolNameTag); + + static IEnumerable> GetConnectionCount() + { + lock (Reporters) + { + var measurements = new List>(); + + for (var i = 0; i < Reporters.Count; i++) + { + var reporter = Reporters[i]; + + var connectionStats = reporter._dataSource.Statistics; + measurements.Add(new Measurement( + connectionStats.Idle, + reporter._poolNameTag, + new KeyValuePair("db.client.connection.state", "idle"))); + + measurements.Add(new Measurement( + connectionStats.Busy, + reporter._poolNameTag, + new KeyValuePair("db.client.connection.state", "used"))); + } + + return measurements; + } + } + + static IEnumerable> GetConnectionMax() + { + lock (Reporters) + { + var measurements = new List>(); + + foreach (var reporter in Reporters) + { + if (reporter._dataSource is PoolingDataSource poolingDataSource) + { + measurements.Add(new Measurement(poolingDataSource.MaxConnections, reporter._poolNameTag)); + } + } + + return measurements; + } + } + + static IEnumerable> GetPreparedCommandsRatio() + { + lock (Reporters) + { + var measurements = new List>(Reporters.Count); + + for (var i = 0; i < Reporters.Count; i++) + { + var reporter = Reporters[i]; + + var counters = new CommandCounters + { + All = Interlocked.Exchange(ref reporter._commandCounters.All, default) + }; + + var value = (double)counters.PreparedCommandsStarted / counters.CommandsStarted * 100; + + if (double.IsFinite(value)) + measurements.Add(new Measurement(value, reporter._poolNameTag)); + } + + return measurements; + } + } + + public void Dispose() + { + lock (Reporters) + { + Reporters.Remove(this); + } + } +} diff --git a/src/Npgsql/MultiHostDataSourceWrapper.cs b/src/Npgsql/MultiHostDataSourceWrapper.cs index 4dcded98cc..cc54e06b29 100644 --- a/src/Npgsql/MultiHostDataSourceWrapper.cs +++ b/src/Npgsql/MultiHostDataSourceWrapper.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Util; using System.Diagnostics.CodeAnalysis; using System.Threading; @@ -7,15 +7,14 @@ namespace Npgsql; -sealed class MultiHostDataSourceWrapper : NpgsqlDataSource +sealed class MultiHostDataSourceWrapper(NpgsqlMultiHostDataSource wrappedSource, TargetSessionAttributes targetSessionAttributes) + : NpgsqlDataSource(CloneSettingsForTargetSessionAttributes(wrappedSource.Settings, targetSessionAttributes), wrappedSource.Configuration, reportMetrics: false) { - internal override bool OwnsConnectors => false; + internal NpgsqlMultiHostDataSource WrappedSource { get; } = wrappedSource; - readonly NpgsqlMultiHostDataSource _wrappedSource; + internal override bool OwnsConnectors => false; - public MultiHostDataSourceWrapper(NpgsqlMultiHostDataSource source, TargetSessionAttributes targetSessionAttributes) - : base(CloneSettingsForTargetSessionAttributes(source.Settings, targetSessionAttributes), source.Configuration) - => _wrappedSource = source; + public override void Clear() => WrappedSource.Clear(); static NpgsqlConnectionStringBuilder CloneSettingsForTargetSessionAttributes( NpgsqlConnectionStringBuilder settings, @@ -26,23 +25,22 @@ static NpgsqlConnectionStringBuilder CloneSettingsForTargetSessionAttributes( return clonedSettings; } - internal override (int Total, int Idle, int Busy) Statistics => _wrappedSource.Statistics; + internal override (int Total, int Idle, int Busy) Statistics => WrappedSource.Statistics; - internal override void Clear() => _wrappedSource.Clear(); internal override ValueTask Get(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) - => _wrappedSource.Get(conn, timeout, async, cancellationToken); + => WrappedSource.Get(conn, timeout, async, cancellationToken); internal override bool TryGetIdleConnector([NotNullWhen(true)] out NpgsqlConnector? connector) => throw new NpgsqlException("Npgsql bug: trying to get an idle connector from " + nameof(MultiHostDataSourceWrapper)); internal override ValueTask OpenNewConnector(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) => throw new NpgsqlException("Npgsql bug: trying to open a new connector from " + nameof(MultiHostDataSourceWrapper)); internal override void Return(NpgsqlConnector connector) - => _wrappedSource.Return(connector); + => WrappedSource.Return(connector); internal override void AddPendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => _wrappedSource.AddPendingEnlistedConnector(connector, transaction); + => WrappedSource.AddPendingEnlistedConnector(connector, transaction); internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => _wrappedSource.TryRemovePendingEnlistedConnector(connector, transaction); + => WrappedSource.TryRemovePendingEnlistedConnector(connector, transaction); internal override bool TryRentEnlistedPending(Transaction transaction, NpgsqlConnection connection, [NotNullWhen(true)] out NpgsqlConnector? connector) - => _wrappedSource.TryRentEnlistedPending(transaction, connection, out connector); -} \ No newline at end of file + => WrappedSource.TryRentEnlistedPending(transaction, connection, out connector); +} diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs deleted file mode 100644 index 2eb1763c3c..0000000000 --- a/src/Npgsql/MultiplexingDataSource.cs +++ /dev/null @@ -1,370 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using System.Threading.Channels; -using System.Threading.Tasks; -using Microsoft.Extensions.Logging; -using Npgsql.Internal; -using Npgsql.Util; - -namespace Npgsql; - -sealed class MultiplexingDataSource : PoolingDataSource -{ - readonly ILogger _connectionLogger; - readonly ILogger _commandLogger; - - readonly bool _autoPrepare; - - internal volatile bool StartupCheckPerformed; - - readonly ChannelReader _multiplexCommandReader; - internal ChannelWriter MultiplexCommandWriter { get; } - - /// - /// When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before - /// flushing to the network. - /// - readonly int _writeCoalescingBufferThresholdBytes; - - // TODO: Make this configurable - const int MultiplexingCommandChannelBound = 4096; - - internal MultiplexingDataSource( - NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig, - NpgsqlMultiHostDataSource? parentPool = null) - : base(settings, dataSourceConfig, parentPool) - { - Debug.Assert(Settings.Multiplexing); - - // TODO: Validate multiplexing options are set only when Multiplexing is on - - _autoPrepare = settings.MaxAutoPrepare > 0; - - _writeCoalescingBufferThresholdBytes = Settings.WriteCoalescingBufferThresholdBytes; - - var multiplexCommandChannel = Channel.CreateBounded( - new BoundedChannelOptions(MultiplexingCommandChannelBound) - { - FullMode = BoundedChannelFullMode.Wait, - SingleReader = true - }); - _multiplexCommandReader = multiplexCommandChannel.Reader; - MultiplexCommandWriter = multiplexCommandChannel.Writer; - - _connectionLogger = dataSourceConfig.LoggingConfiguration.ConnectionLogger; - _commandLogger = dataSourceConfig.LoggingConfiguration.CommandLogger; - - // TODO: Think about cleanup for this, e.g. completing the channel at application shutdown and/or - // pool clearing - _ = Task.Run(MultiplexingWriteLoop, CancellationToken.None) - .ContinueWith(t => - { - // Note that we *must* observe the exception if the task is faulted. - _connectionLogger.LogError(t.Exception, "Exception in multiplexing write loop, this is an Npgsql bug, please file an issue."); - }, TaskContinuationOptions.OnlyOnFaulted); - } - - async Task MultiplexingWriteLoop() - { - // This method is async, but only ever yields when there are no pending commands in the command channel. - // No I/O should ever be performed asynchronously, as that would block further writing for the entire - // application; whenever I/O cannot complete immediately, we chain a callback with ContinueWith and move - // on to the next connector. - Debug.Assert(_multiplexCommandReader != null); - - var stats = new MultiplexingStats { Stopwatch = new Stopwatch() }; - - while (true) - { - NpgsqlConnector? connector; - - // Get a first command out. - if (!_multiplexCommandReader.TryRead(out var command)) - command = await _multiplexCommandReader.ReadAsync(); - - try - { - // First step is to get a connector on which to execute - var spinwait = new SpinWait(); - while (true) - { - if (TryGetIdleConnector(out connector)) - { - // See increment under over-capacity mode below - Interlocked.Increment(ref connector.CommandsInFlightCount); - break; - } - - connector = await OpenNewConnector( - command.Connection!, - new NpgsqlTimeout(TimeSpan.FromSeconds(Settings.Timeout)), - async: true, - CancellationToken.None); - - if (connector != null) - { - // Managed to created a new connector - connector.Connection = null; - - // See increment under over-capacity mode below - Interlocked.Increment(ref connector.CommandsInFlightCount); - - break; - } - - // There were no idle connectors and we're at max capacity, so we can't open a new one. - // Enter over-capacity mode - find an unlocked connector with the least currently in-flight - // commands and sent on it, even though there are already pending commands. - var minInFlight = int.MaxValue; - foreach (var c in Connectors) - { - if (c?.MultiplexAsyncWritingLock == 0 && c.CommandsInFlightCount < minInFlight) - { - minInFlight = c.CommandsInFlightCount; - connector = c; - } - } - - // There could be no writable connectors (all stuck in transaction or flushing). - if (connector == null) - { - // TODO: This is problematic - when absolutely all connectors are both busy *and* currently - // performing (async) I/O, this will spin-wait. - // We could call WaitAsync, but that would wait for an idle connector, whereas we want any - // writeable (non-writing) connector even if it has in-flight commands. Maybe something - // with better back-off. - // On the other hand, this is exactly *one* thread doing spin-wait, maybe not that bad. - spinwait.SpinOnce(); - continue; - } - - // We may be in a race condition with the connector read loop, which may be currently returning - // the connector to the Idle channel (because it has completed all commands). - // Increment the in-flight count to make sure the connector isn't returned as idle. - var newInFlight = Interlocked.Increment(ref connector.CommandsInFlightCount); - if (newInFlight == 1) - { - // The connector's in-flight was 0, so it was idle - abort over-capacity read - // and retry the normal flow. - Interlocked.Decrement(ref connector.CommandsInFlightCount); - spinwait.SpinOnce(); - continue; - } - - break; - } - } - catch (Exception exception) - { - LogMessages.ExceptionWhenOpeningConnectionForMultiplexing(_connectionLogger, exception); - - // Fail the first command in the channel as a way of bubbling the exception up to the user - command.ExecutionCompletion.SetException(exception); - - continue; - } - - // We now have a ready connector, and can start writing commands to it. - Debug.Assert(connector != null); - - try - { - stats.Reset(); - connector.FlagAsNotWritableForMultiplexing(); - command.TraceCommandStart(connector); - - // Read queued commands and write them to the connector's buffer, for as long as we're - // under our write threshold and timer delay. - // Note we already have one command we read above, and have already updated the connector's - // CommandsInFlightCount. Now write that command. - var writtenSynchronously = WriteCommand(connector, command, ref stats); - - while (connector.WriteBuffer.WritePosition < _writeCoalescingBufferThresholdBytes && - writtenSynchronously && - _multiplexCommandReader.TryRead(out command)) - { - Interlocked.Increment(ref connector.CommandsInFlightCount); - writtenSynchronously = WriteCommand(connector, command, ref stats); - } - - // If all commands were written synchronously (good path), complete the write here, flushing - // and updating statistics. If not, CompleteRewrite is scheduled to run later, when the async - // operations complete, so skip it and continue. - if (writtenSynchronously) - Flush(connector, ref stats); - } - catch (Exception ex) - { - FailWrite(connector, ex); - } - } - - bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref MultiplexingStats stats) - { - // Note: this method *never* awaits on I/O - doing so would suspend all outgoing multiplexing commands - // for the entire pool. In the normal/fast case, writing the command is purely synchronous (serialize - // to buffer in memory), and the actual flush will occur at the level above. For cases where the - // command overflows the buffer, async I/O is done, and we schedule continuations separately - - // but the main thread continues to handle other commands on other connectors. - if (_autoPrepare) - { - // TODO: Need to log based on numPrepared like in non-multiplexing mode... - for (var i = 0; i < command.InternalBatchCommands.Count; i++) - command.InternalBatchCommands[i].TryAutoPrepare(connector); - } - - var written = connector.CommandsInFlightWriter!.TryWrite(command); - Debug.Assert(written, $"Failed to enqueue command to {connector.CommandsInFlightWriter}"); - - // Purposefully don't wait for I/O to complete - var task = command.Write(connector, async: true, flush: false); - stats.NumCommands++; - - switch (task.Status) - { - case TaskStatus.RanToCompletion: - return true; - - case TaskStatus.Faulted: - task.GetAwaiter().GetResult(); // Throw the exception - return true; - - case TaskStatus.WaitingForActivation: - case TaskStatus.Running: - { - // Asynchronous completion, which means the writing is flushing to network and there's actual I/O - // (i.e. a big command which overflowed our buffer). - // We don't (ever) await in the write loop, so remove the connector from the writable list (as it's - // still flushing) and schedule a continuation to continue taking care of this connector. - // The write loop continues to the next connector. - - // Create a copy of the statistics and purposefully box it via the closure. We need a separate - // copy of the stats for the async writing that will continue in parallel with this loop. - var clonedStats = stats.Clone(); - - // ReSharper disable once MethodSupportsCancellation - task.ContinueWith((t, o) => - { - var conn = (NpgsqlConnector)o!; - - if (t.IsFaulted) - { - FailWrite(conn, t.Exception!.UnwrapAggregate()); - return; - } - - // There's almost certainly more buffered outgoing data for the command, after the flush - // occured. Complete the write, which will flush again (and update statistics). - try - { - Flush(conn, ref clonedStats); - } - catch (Exception e) - { - FailWrite(conn, e); - } - }, connector); - - return false; - } - - default: - Debug.Fail("When writing command to connector, task is in invalid state " + task.Status); - throw new Exception("When writing command to connector, task is in invalid state " + task.Status); - } - } - - void Flush(NpgsqlConnector connector, ref MultiplexingStats stats) - { - var task = connector.Flush(async: true); - switch (task.Status) - { - case TaskStatus.RanToCompletion: - CompleteWrite(connector, ref stats); - return; - - case TaskStatus.Faulted: - task.GetAwaiter().GetResult(); // Throw the exception - return; - - case TaskStatus.WaitingForActivation: - case TaskStatus.Running: - { - // Asynchronous completion - the flush didn't complete immediately (e.g. TCP zero window). - - // Create a copy of the statistics and purposefully box it via the closure. We need a separate - // copy of the stats for the async writing that will continue in parallel with this loop. - var clonedStats = stats.Clone(); - - task.ContinueWith((t, o) => - { - var conn = (NpgsqlConnector)o!; - if (t.IsFaulted) - { - FailWrite(conn, t.Exception!.UnwrapAggregate()); - return; - } - - CompleteWrite(conn, ref clonedStats); - }, connector); - - return; - } - - default: - Debug.Fail("When flushing, task is in invalid state " + task.Status); - throw new Exception("When flushing, task is in invalid state " + task.Status); - } - } - - void FailWrite(NpgsqlConnector connector, Exception exception) - { - // Note that all commands already passed validation. This means any error here is either an unrecoverable network issue - // (in which case we're already broken), or some other issue while writing (e.g. invalid UTF8 characters in the SQL query) - - // unrecoverable in any case. - - // All commands enqueued in CommandsInFlightWriter will be drained by the reader and failed. - // Note that some of these commands where only written to the connector's buffer, but never - // actually sent - because of a later exception. - // In theory, we could track commands that were only enqueued and not sent, and retry those - // (on another connector), but that would add some book-keeping and complexity, and in any case - // if one connector was broken, chances are that all are (networking). - Debug.Assert(connector.IsBroken); - - LogMessages.ExceptionWhenWritingMultiplexedCommands(_commandLogger, connector.Id, exception); - } - - static void CompleteWrite(NpgsqlConnector connector, ref MultiplexingStats stats) - { - // All I/O has completed, mark this connector as safe for writing again. - // This will allow the connector to be returned to the pool by its read loop, and also to be selected - // for over-capacity write. - connector.FlagAsWritableForMultiplexing(); - - NpgsqlEventSource.Log.MultiplexingBatchSent(stats.NumCommands, stats.Stopwatch); - } - - // ReSharper disable once FunctionNeverReturns - } - - struct MultiplexingStats - { - internal Stopwatch Stopwatch; - internal int NumCommands; - - internal void Reset() - { - NumCommands = 0; - Stopwatch.Reset(); - } - - internal MultiplexingStats Clone() - { - var clone = new MultiplexingStats { Stopwatch = Stopwatch, NumCommands = NumCommands }; - Stopwatch = new Stopwatch(); - return clone; - } - } -} diff --git a/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs b/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs index 1fa188a91e..66b62dc883 100644 --- a/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs +++ b/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs @@ -1,4 +1,4 @@ -namespace Npgsql; +namespace Npgsql; /// /// A component which translates a CLR name (e.g. SomeClass) into a database name (e.g. some_class) diff --git a/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs index 30ff5b9725..4016825871 100644 --- a/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs @@ -1,11 +1,11 @@ -using System; +using System; namespace Npgsql.NameTranslation; /// /// A name translator which preserves CLR names (e.g. SomeClass) when mapping names to the database. /// -public class NpgsqlNullNameTranslator : INpgsqlNameTranslator +public sealed class NpgsqlNullNameTranslator : INpgsqlNameTranslator { /// /// Given a CLR type name (e.g class, struct, enum), translates its name to a database type name. diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index 963e1f4e2e..805c5d6b61 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -1,6 +1,6 @@ -using System; +using System; +using System.Collections.Generic; using System.Globalization; -using System.Linq; using System.Text; namespace Npgsql.NameTranslation; @@ -9,13 +9,22 @@ namespace Npgsql.NameTranslation; /// A name translator which converts standard CLR names (e.g. SomeClass) to snake-case database /// names (some_class) /// -public class NpgsqlSnakeCaseNameTranslator : INpgsqlNameTranslator +public sealed class NpgsqlSnakeCaseNameTranslator : INpgsqlNameTranslator { + internal static NpgsqlSnakeCaseNameTranslator Instance { get; } = new(); + + readonly CultureInfo _culture; + /// /// Creates a new . /// - public NpgsqlSnakeCaseNameTranslator() - : this(false) { } + /// + /// An object that supplies culture-specific casing rules. + /// This will be used when converting names to lower case. + /// If then will be used. + /// + public NpgsqlSnakeCaseNameTranslator(CultureInfo? culture = null) + : this(false, culture) { } /// /// Creates a new . @@ -23,8 +32,16 @@ public NpgsqlSnakeCaseNameTranslator() /// /// Uses the legacy naming convention if , otherwise it uses the new naming convention. /// - public NpgsqlSnakeCaseNameTranslator(bool legacyMode) - => LegacyMode = legacyMode; + /// + /// An object that supplies culture-specific casing rules. + /// This will be used when converting names to lower case. + /// If then will be used. + /// + public NpgsqlSnakeCaseNameTranslator(bool legacyMode, CultureInfo? culture = null) + { + LegacyMode = legacyMode; + _culture = culture ?? CultureInfo.InvariantCulture; + } bool LegacyMode { get; } @@ -38,19 +55,32 @@ public NpgsqlSnakeCaseNameTranslator(bool legacyMode) /// public string TranslateMemberName(string clrName) { - if (clrName == null) - throw new ArgumentNullException(nameof(clrName)); + ArgumentNullException.ThrowIfNull(clrName); return LegacyMode - ? string.Concat(clrName.Select((c, i) => i > 0 && char.IsUpper(c) ? "_" + c.ToString() : c.ToString())).ToLower() - : ConvertToSnakeCase(clrName); + ? string.Concat(LegacyModeMap(clrName)).ToLower(_culture) + : ConvertToSnakeCase(clrName, _culture); + + IEnumerable LegacyModeMap(string clrName) + { + for (var i = 0; i < clrName.Length; i++) + { + var c = clrName[i]; + yield return i > 0 && char.IsUpper(c) ? "_" + c.ToString() : c.ToString(); + } + } } /// /// Converts a string to its snake_case equivalent. /// /// The value to convert. - public static string ConvertToSnakeCase(string name) + /// + /// An object that supplies culture-specific casing rules. + /// This will be used when converting names to lower case. + /// If then will be used. + /// + public static string ConvertToSnakeCase(string name, CultureInfo? culture = null) { if (string.IsNullOrEmpty(name)) return name; @@ -84,7 +114,7 @@ public static string ConvertToSnakeCase(string name) builder.Append('_'); } - currentChar = char.ToLower(currentChar); + currentChar = char.ToLower(currentChar, culture ?? CultureInfo.InvariantCulture); break; case UnicodeCategory.LowercaseLetter: @@ -105,4 +135,4 @@ public static string ConvertToSnakeCase(string name) return builder.ToString(); } -} \ No newline at end of file +} diff --git a/src/Npgsql/NoSynchronizationContextScope.cs b/src/Npgsql/NoSynchronizationContextScope.cs deleted file mode 100644 index d34d884856..0000000000 --- a/src/Npgsql/NoSynchronizationContextScope.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System; -using System.Threading; - -namespace Npgsql; - -/// -/// This mechanism is used to temporarily set the current synchronization context to null while -/// executing Npgsql code, making all await continuations execute on the thread pool. This replaces -/// the need to place ConfigureAwait(false) everywhere, and should be used in all surface async methods, -/// without exception. -/// -/// Warning: do not use this directly in async methods, use it in sync wrappers of async methods -/// (see https://github.com/npgsql/npgsql/issues/1593) -/// -/// -/// https://stackoverflow.com/a/28307965/640325 -/// -static class NoSynchronizationContextScope -{ - internal static Disposable Enter() => new(SynchronizationContext.Current); - - internal struct Disposable : IDisposable - { - readonly SynchronizationContext? _synchronizationContext; - - internal Disposable(SynchronizationContext? synchronizationContext) - { - if (synchronizationContext != null) - SynchronizationContext.SetSynchronizationContext(null); - - _synchronizationContext = synchronizationContext; - } - - public void Dispose() - => SynchronizationContext.SetSynchronizationContext(_synchronizationContext); - } -} \ No newline at end of file diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 368e04ca57..5ebaefcdf5 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -1,43 +1,26 @@ - + - Shay Rojansky;Nikita Kazmin;Brar Piening;Yoh Deadfall;;Austin Drenski;Emil Lenngren;Francisco Figueiredo Jr.;Kenji Uno + Shay Rojansky;Nikita Kazmin;Brar Piening;Nino Floris;Yoh Deadfall;;Austin Drenski;Emil Lenngren;Francisco Figueiredo Jr.;Kenji Uno Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - - netstandard2.0;netstandard2.1;netcoreapp3.1;net5.0;net6.0;net7.0 - net7.0 + net10.0 + $(NoWarn);CA2017 + $(NoWarn);NPG9001 + $(NoWarn);NPG9002 + $(NoWarn);NPG9003 - + - - - - - - - - - - - - - - - - - - @@ -56,5 +39,4 @@ NpgsqlStrings.resx - diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 002cf4a638..f2a005f02d 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -1,5 +1,6 @@ -using Npgsql.Internal; +using Npgsql.Internal; using System; +using System.Data; using System.Diagnostics; using System.Net; using System.Net.Sockets; @@ -7,82 +8,181 @@ namespace Npgsql; +// Semantic conventions for database client spans: https://opentelemetry.io/docs/specs/semconv/database/database-spans/ +// Semantic conventions for PostgreSQL client operations: https://opentelemetry.io/docs/specs/semconv/database/postgresql/ static class NpgsqlActivitySource { - static readonly ActivitySource Source; + static readonly ActivitySource Source = new("Npgsql", GetLibraryVersion()); - static NpgsqlActivitySource() + internal static bool IsEnabled => Source.HasListeners(); + + internal static Activity? CommandStart(string commandText, CommandType commandType, bool? prepared, string? spanName) { - var assembly = typeof(NpgsqlActivitySource).Assembly; - var version = assembly.GetCustomAttribute()?.Version ?? "0.0.0"; - Source = new("Npgsql", version); - } + string? operationName = null; - internal static bool IsEnabled => Source.HasListeners(); + switch (commandType) + { + case CommandType.StoredProcedure: + // We follow the {db.operation.name} {target} pattern of the spec, with the operation being SELECT/CALL and + // the target being the stored procedure name. + operationName = NpgsqlCommand.EnableStoredProcedureCompatMode ? "SELECT" : "CALL"; + spanName ??= $"{operationName} {commandText}"; + break; + case CommandType.TableDirect: + // We follow the {db.operation.name} {target} pattern of the spec, with the operation being SELECT and + // the target being the table (collection) name. + operationName = "SELECT"; + spanName ??= $"{operationName} {commandText}"; + break; + case CommandType.Text: + // We don't have db.query.summary, db.operation.name or target (without parsing SQL), + // so we fall back to db.system.name as per the specs. + spanName ??= "postgresql"; + break; + default: + throw new ArgumentOutOfRangeException(nameof(commandType), commandType, null); + } + + var activity = Source.StartActivity(spanName, ActivityKind.Client); + if (activity is not { IsAllDataRequested: true }) + return activity; - internal static Activity? CommandStart(NpgsqlConnector connector, string sql) + activity.SetTag("db.query.text", commandText); + + if (prepared is true) + activity.SetTag("db.npgsql.prepared", true); + + switch (commandType) + { + case CommandType.StoredProcedure: + Debug.Assert(operationName is not null); + activity.SetTag("db.operation.name", operationName); + activity.SetTag("db.stored_procedure.name", commandText); + break; + case CommandType.TableDirect: + Debug.Assert(operationName is not null); + activity.SetTag("db.operation.name", operationName); + activity.SetTag("db.collection.name", commandText); + break; + } + + return activity; + } + + internal static Activity? PhysicalConnectionOpen(NpgsqlConnector connector) { - var settings = connector.Settings; - var activity = Source.StartActivity(settings.Database!, ActivityKind.Client); + if (!connector.DataSource.Configuration.TracingOptions.EnablePhysicalOpenTracing) + return null; + + // Note that physical connection open is not part of the OpenTelemetry spec. + // We emit it if enabled, following the general name/tags guidelines. + var dbName = connector.Settings.Database ?? connector.InferredUserName; + var activity = Source.StartActivity("CONNECT " + dbName, ActivityKind.Client); if (activity is not { IsAllDataRequested: true }) return activity; - activity.SetTag("db.system", "postgresql"); - activity.SetTag("db.connection_string", connector.UserFacingConnectionString); - activity.SetTag("db.user", settings.Username); - activity.SetTag("db.name", settings.Database); - activity.SetTag("db.statement", sql); - activity.SetTag("db.connection_id", connector.Id); + // We set these basic tags on the activity so that they're populated even when the physical open fails. + activity.SetTag("db.system.name", "postgresql"); + activity.SetTag("db.npgsql.data_source", connector.DataSource.Name); + + return activity; + } + + internal static void Enrich(Activity activity, NpgsqlConnector connector) + { + if (!activity.IsAllDataRequested) + return; + + activity.SetTag("db.system.name", "postgresql"); + + // TODO: For now, we only set the database name, without adding the first schema in the search_path + // as per the PG tracing specs (https://opentelemetry.io/docs/specs/semconv/database/postgresql/). + // See #6336 + activity.SetTag("db.namespace", connector.Settings.Database ?? connector.InferredUserName); var endPoint = connector.ConnectedEndPoint; Debug.Assert(endPoint is not null); + activity.SetTag("server.address", connector.Host); switch (endPoint) { case IPEndPoint ipEndPoint: - activity.SetTag("net.transport", "ip_tcp"); - activity.SetTag("net.peer.ip", ipEndPoint.Address.ToString()); if (ipEndPoint.Port != 5432) - activity.SetTag("net.peer.port", ipEndPoint.Port); - activity.SetTag("net.peer.name", settings.Host); + activity.SetTag("server.port", ipEndPoint.Port); break; case UnixDomainSocketEndPoint: - activity.SetTag("net.transport", "unix"); - activity.SetTag("net.peer.name", settings.Host); break; default: - throw new ArgumentOutOfRangeException("Invalid endpoint type: " + endPoint.GetType()); + throw new UnreachableException("Invalid endpoint type: " + endPoint.GetType()); } - return activity; + // Npgsql-specific tags + activity.SetTag("db.npgsql.data_source", connector.DataSource.Name); + activity.SetTag("db.npgsql.connection_id", connector.Id); } - internal static void ReceivedFirstResponse(Activity activity) + internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptions tracingOptions) { + if (!activity.IsAllDataRequested || !tracingOptions.EnableFirstResponseEvent) + return; + var activityEvent = new ActivityEvent("received-first-response"); activity.AddEvent(activityEvent); } - internal static void CommandStop(Activity activity) + internal static void SetException(Activity activity, Exception exception, bool escaped = true) { - activity.SetTag("otel.status_code", "OK"); + activity.AddException(exception); + + if (exception is PostgresException { SqlState: var sqlState }) + { + activity.SetTag("db.response.status_code", sqlState); + + // error.type SHOULD match the db.response.status_code returned by the database or the client library, or the canonical name of exception that occurred. + // Since we don't have a table to map the error code to a textual description, the SQL state is the best we can do. + activity.SetTag("error.type", sqlState); + } + else + { + if (exception is NpgsqlException { InnerException: Exception innerException }) + exception = innerException; + + activity.SetTag("error.type", exception.GetType().FullName); + } + + var statusDescription = exception is PostgresException pgEx ? pgEx.SqlState : exception.Message; + activity.SetStatus(ActivityStatusCode.Error, statusDescription); activity.Dispose(); } - internal static void SetException(Activity activity, Exception ex, bool escaped = true) + internal static Activity? CopyStart(string command, NpgsqlConnector connector, string? spanName, string operation) { - var tags = new ActivityTagsCollection - { - { "exception.type", ex.GetType().FullName }, - { "exception.message", ex.Message }, - { "exception.stacktrace", ex.ToString() }, - { "exception.escaped", escaped } - }; - var activityEvent = new ActivityEvent("exception", tags: tags); - activity.AddEvent(activityEvent); - activity.SetTag("otel.status_code", "ERROR"); - activity.SetTag("otel.status_description", ex is PostgresException pgEx ? pgEx.SqlState : ex.Message); + var activity = Source.StartActivity(spanName ?? operation, ActivityKind.Client); + if (activity is not { IsAllDataRequested: true }) + return activity; + activity.SetTag("db.query.text", command); + activity.SetTag("db.operation.name", operation); + Enrich(activity, connector); + return activity; + } + + internal static void SetOperation(Activity activity, string operation) + { + if (!activity.IsAllDataRequested) + return; + activity.SetTag("db.operation.name", operation); + } + + internal static void CopyStop(Activity activity, ulong? rows = null) + { + if (rows.HasValue) + activity.SetTag("db.npgsql.rows", rows.Value); activity.Dispose(); } -} \ No newline at end of file + + static string GetLibraryVersion() + => typeof(NpgsqlDataSource).Assembly + .GetCustomAttribute()? + .InformationalVersion ?? "UNKNOWN"; +} diff --git a/src/Npgsql/NpgsqlBatch.cs b/src/Npgsql/NpgsqlBatch.cs index 0b86bb3164..e692199e2b 100644 --- a/src/Npgsql/NpgsqlBatch.cs +++ b/src/Npgsql/NpgsqlBatch.cs @@ -1,3 +1,4 @@ +using System; using System.Data; using System.Data.Common; using System.Threading; @@ -98,7 +99,8 @@ internal bool AllResultTypesAreUnknown /// The in which the executes. public NpgsqlBatch(NpgsqlConnection? connection = null, NpgsqlTransaction? transaction = null) { - Command = new(DefaultBatchCommandsSize); + GC.SuppressFinalize(this); + Command = new(this, DefaultBatchCommandsSize); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); Connection = connection; @@ -107,18 +109,23 @@ public NpgsqlBatch(NpgsqlConnection? connection = null, NpgsqlTransaction? trans internal NpgsqlBatch(NpgsqlConnector connector) { - Command = new(connector, DefaultBatchCommandsSize); + GC.SuppressFinalize(this); + Command = new(this, connector, DefaultBatchCommandsSize); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); } - private protected NpgsqlBatch(NpgsqlDataSourceCommand command) + private protected NpgsqlBatch(Func commandFactory, NpgsqlConnection connection) { - Command = command; + GC.SuppressFinalize(this); + Command = commandFactory(connection, this); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); } /// - protected override DbBatchCommand CreateDbBatchCommand() + protected override DbBatchCommand CreateDbBatchCommand() => CreateBatchCommand(); + + /// + public new NpgsqlBatchCommand CreateBatchCommand() => new NpgsqlBatchCommand(); /// @@ -133,7 +140,7 @@ protected override DbDataReader ExecuteDbDataReader(CommandBehavior behavior) protected override async Task ExecuteDbDataReaderAsync( CommandBehavior behavior, CancellationToken cancellationToken) - => await ExecuteReaderAsync(behavior, cancellationToken); + => await ExecuteReaderAsync(behavior, cancellationToken).ConfigureAwait(false); /// public new Task ExecuteReaderAsync(CancellationToken cancellationToken = default) @@ -171,4 +178,26 @@ public override Task PrepareAsync(CancellationToken cancellationToken = default) /// public override void Cancel() => Command.Cancel(); -} \ No newline at end of file + + /// + public override void Dispose() + { + Command.ResetTransaction(); + if (Command.IsCacheable && Connection is not null && Connection.CachedBatch is null) + { + BatchCommands.Clear(); + Command.Reset(); + Connection.CachedBatch = this; + return; + } + + Command.IsCacheable = false; + } + + internal static NpgsqlBatch CreateCachedBatch(NpgsqlConnection connection) + { + var batch = new NpgsqlBatch(connection); + batch.Command.IsCacheable = true; + return batch; + } +} diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 78aedc1f7e..9534a54b17 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -1,10 +1,12 @@ -using System; +using System; +using System.Buffers; using System.Collections.Generic; using System.Data; using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; +using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; @@ -13,6 +15,8 @@ namespace Npgsql; /// public sealed class NpgsqlBatchCommand : DbBatchCommand { + internal static readonly List EmptyParameters = []; + string _commandText; /// @@ -20,7 +24,13 @@ public sealed class NpgsqlBatchCommand : DbBatchCommand public override string CommandText { get => _commandText; - set => _commandText = value ?? string.Empty; + set + { + _commandText = value ?? string.Empty; + + ResetPreparation(); + // TODO: Technically should do this also if the parameter list (or type) changes + } } /// @@ -29,8 +39,18 @@ public override string CommandText /// protected override DbParameterCollection DbParameterCollection => Parameters; + internal NpgsqlParameterCollection? _parameters; /// - public new NpgsqlParameterCollection Parameters { get; } = new(); + public new NpgsqlParameterCollection Parameters => _parameters ??= []; + + internal bool HasOutputParameters => _parameters?.HasOutputParameters == true; + + /// + public override NpgsqlParameter CreateParameter() => new(); + + /// + public override bool CanCreateParameter => true; + /// /// Appends an error barrier after this batch command. Defaults to the value of on the @@ -115,10 +135,14 @@ public override int RecordsAffected /// internal List PositionalParameters { - get => _inputParameters ??= _ownedInputParameters ??= new(); + get => _inputParameters ??= _ownedInputParameters ??= []; set => _inputParameters = value; } + internal bool HasParameters => _inputParameters?.Count > 0 || _ownedInputParameters?.Count > 0; + + internal List CurrentParametersReadOnly => HasParameters ? PositionalParameters : EmptyParameters; + List? _ownedInputParameters; List? _inputParameters; @@ -145,20 +169,22 @@ internal RowDescriptionMessage? Description /// internal PreparedStatement? PreparedStatement { - get => _preparedStatement != null && _preparedStatement.State == PreparedState.Unprepared + get => _preparedStatement is { State: PreparedState.Unprepared } ? _preparedStatement = null : _preparedStatement; - set => _preparedStatement = value; + private set => _preparedStatement = value; } PreparedStatement? _preparedStatement; + internal NpgsqlConnector? ConnectorPreparedOn { get; set; } + internal bool IsPreparing; /// - /// Holds the server-side (prepared) statement name. Empty string for non-prepared statements. + /// Holds the server-side (prepared) ASCII statement name. Empty string for non-prepared statements. /// - internal string StatementName => PreparedStatement?.Name ?? ""; + internal byte[] StatementName => PreparedStatement?.Name ?? []; /// /// Whether this statement has already been prepared (including automatic preparation). @@ -248,6 +274,118 @@ internal void ApplyCommandComplete(CommandCompleteMessage msg) OID = msg.OID; } + internal void ResetPreparation() + { + ConnectorPreparedOn = null; + PreparedStatement = null; + } + + internal void PopulateOutputParameters(NpgsqlDataReader reader, ILogger logger) + { + Debug.Assert(_parameters is not null); + var parameters = _parameters; + var fieldCount = reader.FieldCount; + switch (parameters.PlaceholderType) + { + case PlaceholderType.Mixed: + case PlaceholderType.Named: + { + // In the case of named and mixed parameters we first try to populate all parameters with a named column match. + // For backwards compat we allow populating named parameters as long as they haven't been filled yet. + // So for every column that we couldn't match by name we fill the first output direction parameter that wasn't filled previously. + // This means a row like {"a" => 1, "some_field" => 2} will populate the following output db params {"a" => 1, "b" => 2}. + // And a row like {"some_field" => 1, "a" => 2} will populate them as follows {"a" => 2, "b" => 1}. + + var parameterIndices = new ArraySegment(ArrayPool.Shared.Rent(fieldCount), 0, fieldCount); + var secondPassOrdinal = -1; + for (var ordinal = 0; ordinal < fieldCount; ordinal++) + { + var name = reader.GetName(ordinal); + var i = parameters.IndexOf(name); + if (i is not -1 && parameters[i] is { IsOutputDirection: true } parameter) + { + SetValue(reader, logger, parameter, ordinal, i); + parameterIndices[ordinal] = i; + } + else + { + parameterIndices[ordinal] = -1; + if (secondPassOrdinal is -1) + secondPassOrdinal = ordinal; + } + } + + if (secondPassOrdinal is -1) + { + ArrayPool.Shared.Return(parameterIndices.Array!); + break; + } + + // This set will also contain -1, but that's not a valid index so we can ignore it is included. + var matchedParameters = new HashSet(parameterIndices); + var parameterList = parameters.InternalList; + for (var i = 0; i < parameterList.Count; i++) + { + // Find an output parameter that wasn't matched by name. + if (parameterList[i] is not { IsOutputDirection: true } parameter || matchedParameters.Contains(i)) + continue; + + SetValue(reader, logger, parameter, secondPassOrdinal, i); + + // And find the next unhandled ordinal. + secondPassOrdinal = NextSecondPassOrdinal(parameterIndices, secondPassOrdinal); + if (secondPassOrdinal is -1) + break; + } + + ArrayPool.Shared.Return(parameterIndices.Array!); + break; + + static int NextSecondPassOrdinal(ArraySegment indices, int offset) + { + for (var i = offset + 1; i < indices.Count; i++) + { + if (indices[i] is -1) + return i; + } + + return -1; + } + } + case PlaceholderType.Positional: + { + var parameterList = parameters.InternalList; + var ordinal = 0; + for (var i = 0; i < parameterList.Count; i++) + { + if (parameterList[i] is not { IsOutputDirection: true } parameter) + continue; + + SetValue(reader, logger, parameter, ordinal, i); + + ordinal++; + if (ordinal == fieldCount) + break; + } + break; + } + } + + static void SetValue(NpgsqlDataReader reader, ILogger logger, NpgsqlParameter p, int ordinal, int index) + { + try + { + p.SetOutputValue(reader, ordinal); + } + catch (Exception ex) + { + logger.LogDebug(ex, "Failed to set value on output parameter instance '{ParameterNameOrIndex}' for output parameter {OutputName}", + p.ParameterName is NpgsqlParameter.PositionalName ? index : p.ParameterName, reader.GetName(ordinal)); + throw; + } + } + } + /// /// Returns the . /// diff --git a/src/Npgsql/NpgsqlBatchCommandCollection.cs b/src/Npgsql/NpgsqlBatchCommandCollection.cs index 7a345f609b..a79afa359b 100644 --- a/src/Npgsql/NpgsqlBatchCommandCollection.cs +++ b/src/Npgsql/NpgsqlBatchCommandCollection.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; namespace Npgsql; @@ -81,7 +82,7 @@ NpgsqlBatchCommand IList.this[int index] set => _list[index] = value; } - /// + /// public new NpgsqlBatchCommand this[int index] { get => _list[index]; @@ -97,8 +98,16 @@ protected override void SetBatchCommand(int index, DbBatchCommand batchCommand) => _list[index] = Cast(batchCommand); static NpgsqlBatchCommand Cast(DbBatchCommand? value) - => value is NpgsqlBatchCommand c - ? c - : throw new InvalidCastException( - $"The value \"{value}\" is not of type \"{nameof(NpgsqlBatchCommand)}\" and cannot be used in this batch command collection."); -} \ No newline at end of file + { + var castedValue = value as NpgsqlBatchCommand; + if (castedValue is null) + ThrowInvalidCastException(value); + + return castedValue; + } + + [DoesNotReturn] + static void ThrowInvalidCastException(DbBatchCommand? value) => + throw new InvalidCastException( + $"The value \"{value}\" is not of type \"{nameof(NpgsqlBatchCommand)}\" and cannot be used in this batch command collection."); +} diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 5415411062..bccb34507e 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -1,14 +1,13 @@ -using System; +using System; using System.Diagnostics; -using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.TypeMapping; +using Npgsql.Internal.Postgres; using NpgsqlTypes; +using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; namespace Npgsql; @@ -19,23 +18,27 @@ namespace Npgsql; /// public sealed class NpgsqlBinaryExporter : ICancelable { + const int BeforeRow = -2; + const int BeforeColumn = -1; + #region Fields and Properties NpgsqlConnector _connector; NpgsqlReadBuffer _buf; - TypeMapper _typeMapper; - bool _isConsumed, _isDisposed; - int _leftToReadInDataMsg, _columnLen; + ExporterState _state = ExporterState.Uninitialized; + long _endOfMessagePos; short _column; ulong _rowsExported; + PgReader PgReader => _buf.PgReader; + /// /// The number of columns, as returned from the backend in the CopyInResponse. /// - internal int NumColumns { get; private set; } + int NumColumns { get; set; } - NpgsqlTypeHandler?[] _typeHandlerCache; + ReadConversionContext[] _conversionContextCache; readonly ILogger _copyLogger; @@ -44,14 +47,11 @@ public sealed class NpgsqlBinaryExporter : ICancelable /// public TimeSpan Timeout { - set - { - _buf.Timeout = value; - // While calling Complete(), we're using the connector, which overwrites the buffer's timeout with it's own - _connector.UserTimeout = (int)value.TotalMilliseconds; - } + set => _buf.Timeout = value > TimeSpan.Zero ? value : InfiniteTimeout.InfiniteTimeSpan; } + Activity? _activity; + #endregion #region Construction / Initialization @@ -60,63 +60,75 @@ internal NpgsqlBinaryExporter(NpgsqlConnector connector) { _connector = connector; _buf = connector.ReadBuffer; - _typeMapper = connector.TypeMapper; - _columnLen = int.MinValue; // Mark that the (first) column length hasn't been read yet - _column = -1; - _typeHandlerCache = null!; + _column = BeforeRow; + _conversionContextCache = null!; _copyLogger = connector.LoggingConfiguration.CopyLogger; } internal async Task Init(string copyToCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyToCommand, async, cancellationToken); - await _connector.Flush(async, cancellationToken); - - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + Debug.Assert(_activity is null); + _activity = _connector.TraceCopyStart(copyToCommand, "COPY TO"); - CopyOutResponseMessage copyOutResponse; - var msg = await _connector.ReadMessage(async); - switch (msg.Code) + try { - case BackendMessageCode.CopyOutResponse: - copyOutResponse = (CopyOutResponseMessage) msg; - if (!copyOutResponse.IsBinary) + await _connector.WriteQuery(copyToCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + CopyOutResponseMessage copyOutResponse; + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) { - throw _connector.Break( - new ArgumentException("copyToCommand triggered a text transfer, only binary is allowed", - nameof(copyToCommand))); + case BackendMessageCode.CopyOutResponse: + copyOutResponse = (CopyOutResponseMessage)msg; + if (!copyOutResponse.IsBinary) + { + throw _connector.Break( + new ArgumentException("copyToCommand triggered a text transfer, only binary is allowed", + nameof(copyToCommand))); + } + break; + case BackendMessageCode.CommandComplete: + throw new InvalidOperationException( + "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + + "Note that your data has been successfully imported/exported."); + default: + throw _connector.UnexpectedMessageReceived(msg.Code); } - break; - case BackendMessageCode.CommandComplete: - throw new InvalidOperationException( - "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + - "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + - "Note that your data has been successfully imported/exported."); - default: - throw _connector.UnexpectedMessageReceived(msg.Code); - } - NumColumns = copyOutResponse.NumColumns; - _typeHandlerCache = new NpgsqlTypeHandler[NumColumns]; - _rowsExported = 0; - await ReadHeader(async); + _state = ExporterState.Ready; + NumColumns = copyOutResponse.NumColumns; + _conversionContextCache = new ReadConversionContext[NumColumns]; + _rowsExported = 0; + _endOfMessagePos = _buf.CumulativeReadPosition; + await ReadHeader(async).ConfigureAwait(false); + } + catch (Exception e) + { + TraceSetException(e); + throw; + } } async Task ReadHeader(bool async) { - _leftToReadInDataMsg = Expect(await _connector.ReadMessage(async), _connector).Length; + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + _endOfMessagePos = _buf.CumulativeReadPosition + Expect(msg, _connector).Length; var headerLen = NpgsqlRawCopyStream.BinarySignature.Length + 4 + 4; - await _buf.Ensure(headerLen, async); + await _buf.Ensure(headerLen, async).ConfigureAwait(false); - if (NpgsqlRawCopyStream.BinarySignature.Any(t => _buf.ReadByte() != t)) - throw new NpgsqlException("Invalid COPY binary signature at beginning!"); + foreach (var t in NpgsqlRawCopyStream.BinarySignature) + if (_buf.ReadByte() != t) + throw new NpgsqlException("Invalid COPY binary signature at beginning!"); var flags = _buf.ReadInt32(); if (flags != 0) throw new NotSupportedException("Unsupported flags in COPY operation (OID inclusion?)"); _buf.ReadInt32(); // Header extensions, currently unused - _leftToReadInDataMsg -= headerLen; } #endregion @@ -139,46 +151,53 @@ async Task ReadHeader(bool async) /// The number of columns in the row. -1 if there are no further rows. /// Note: This will currently be the same value for all rows, but this may change in the future. /// - public ValueTask StartRowAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return StartRow(true, cancellationToken); - } + public ValueTask StartRowAsync(CancellationToken cancellationToken = default) => StartRow(true, cancellationToken); async ValueTask StartRow(bool async, CancellationToken cancellationToken = default) { - CheckDisposed(); - if (_isConsumed) + ThrowIfDisposed(); + if (_state == ExporterState.Consumed) return -1; using var registration = _connector.StartNestedCancellableOperation(cancellationToken); + // Consume and advance any active column. + if (_column >= 0) + { + if (async) + await PgReader.CommitAsync().ConfigureAwait(false); + else + PgReader.Commit(); + _column++; + } + // The very first row (i.e. _column == -1) is included in the header's CopyData message. // Otherwise we need to read in a new CopyData row (the docs specify that there's a CopyData // message per row). if (_column == NumColumns) - _leftToReadInDataMsg = Expect(await _connector.ReadMessage(async), _connector).Length; - else if (_column != -1) - throw new InvalidOperationException("Already in the middle of a row"); + { + var msg = Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + _endOfMessagePos = _buf.CumulativeReadPosition + msg.Length; + } + else if (_column != BeforeRow) + ThrowHelper.ThrowInvalidOperationException("Already in the middle of a row"); - await _buf.Ensure(2, async); - _leftToReadInDataMsg -= 2; + await _buf.Ensure(2, async).ConfigureAwait(false); var numColumns = _buf.ReadInt16(); if (numColumns == -1) { - Debug.Assert(_leftToReadInDataMsg == 0); - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); - _column = -1; - _isConsumed = true; + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + _column = BeforeRow; + _state = ExporterState.Consumed; return -1; } Debug.Assert(numColumns == NumColumns); - _column = 0; + _column = BeforeColumn; _rowsExported++; return NumColumns; } @@ -193,7 +212,8 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = /// specify the type. /// /// The value of the column - public T Read() => Read(false).GetAwaiter().GetResult(); + public T Read() + => Read(null); /// /// Reads the current column, returns its value and moves ahead to the next column. @@ -206,25 +226,7 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = /// /// The value of the column public ValueTask ReadAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Read(true, cancellationToken); - } - - ValueTask Read(bool async, CancellationToken cancellationToken = default) - { - CheckDisposed(); - - if (_column == -1 || _column == NumColumns) - throw new InvalidOperationException("Not reading a row"); - - var type = typeof(T); - var handler = _typeHandlerCache[_column]; - if (handler == null) - handler = _typeHandlerCache[_column] = _typeMapper.ResolveByClrType(type); - - return DoRead(handler, async, cancellationToken); - } + => ReadAsync(null, cancellationToken); /// /// Reads the current column, returns its value according to and @@ -239,7 +241,8 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) /// /// The .NET type of the column to be read. /// The value of the column - public T Read(NpgsqlDbType type) => Read(type, false).GetAwaiter().GetResult(); + public T Read(NpgsqlDbType type) + => Read((NpgsqlDbType?)type); /// /// Reads the current column, returns its value according to and @@ -258,60 +261,101 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) /// The .NET type of the column to be read. /// The value of the column public ValueTask ReadAsync(NpgsqlDbType type, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Read(type, true, cancellationToken); - } + => ReadAsync((NpgsqlDbType?)type, cancellationToken); - ValueTask Read(NpgsqlDbType type, bool async, CancellationToken cancellationToken = default) + T Read(NpgsqlDbType? type) { - CheckDisposed(); - if (_column == -1 || _column == NumColumns) - throw new InvalidOperationException("Not reading a row"); + ThrowIfNotOnRow(); + + if (!IsInitializedAndAtStart) + MoveNextColumn(resumableOp: false); - var handler = _typeHandlerCache[_column]; - if (handler == null) - handler = _typeHandlerCache[_column] = _typeMapper.ResolveByNpgsqlDbType(type); + var reader = PgReader; + try + { + if (reader.FieldIsDbNull) + return DbNullOrThrow(); - return DoRead(handler, async, cancellationToken); + var typeInfo = GetConversionContext(typeof(T), type, out var bindingContext); + return typeInfo.ReadFieldValue(reader, bindingContext); + } + finally + { + // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). + // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. + // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. + reader.Commit(); + } } - async ValueTask DoRead(NpgsqlTypeHandler handler, bool async, CancellationToken cancellationToken = default) + async ValueTask ReadAsync(NpgsqlDbType? type, CancellationToken cancellationToken) { + ThrowIfNotOnRow(); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + if (!IsInitializedAndAtStart) + await MoveNextColumnAsync(resumableOp: false).ConfigureAwait(false); + + var reader = PgReader; try { - using var registration = _connector.StartNestedCancellableOperation(cancellationToken); + if (reader.FieldIsDbNull) + return DbNullOrThrow(); - await ReadColumnLenIfNeeded(async); + var typeInfo = GetConversionContext(typeof(T), type, out var bindingContext); + return await typeInfo.ReadFieldValueAsync(reader, bindingContext, cancellationToken).ConfigureAwait(false); + } + finally + { + // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). + // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. + // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. + await reader.CommitAsync().ConfigureAwait(false); + } + } - if (_columnLen == -1) + static T DbNullOrThrow() + { + // When T is a Nullable, we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; + throw new InvalidCastException("Column is null"); + } + + PgConcreteTypeInfo GetConversionContext(Type type, NpgsqlDbType? npgsqlDbType, out PgFieldBinding binding) + { + ref var contextRef = ref _conversionContextCache[_column]; + var context = contextRef.IsDefault ? contextRef = GetInfoAndBind(type, npgsqlDbType) : contextRef; + binding = context.Binding; + return context.TypeInfo; + + ReadConversionContext GetInfoAndBind(Type type, NpgsqlDbType? npgsqlDbType) + { + var options = _connector.SerializerOptions; + PgTypeId? pgTypeId = null; + if (npgsqlDbType.HasValue) { -#pragma warning disable CS8653 // A default expression introduces a null value when 'T' is a non-nullable reference type. - // When T is a Nullable, we support returning null - if (NullableHandler.Exists) - return default!; -#pragma warning restore CS8653 - throw new InvalidCastException("Column is null"); + pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name + ? options.GetCanonicalTypeId(name) + // Handle plugin types via lookup. + : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); } + var typeInfo = options.GetTypeInfoInternal(type, pgTypeId) + ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); - // If we know the entire column is already in memory, use the code path without async - var result = NullableHandler.Exists - ? _columnLen <= _buf.ReadBytesLeft - ? NullableHandler.Read(handler, _buf, _columnLen) - : await NullableHandler.ReadAsync(handler, _buf, _columnLen, async) - : _columnLen <= _buf.ReadBytesLeft - ? handler.Read(_buf, _columnLen) - : await handler.Read(_buf, _columnLen, async); - - _leftToReadInDataMsg -= _columnLen; - _columnLen = int.MinValue; // Mark that the (next) column length hasn't been read yet - _column++; - return result; - } - catch (Exception e) - { - _connector.Break(e); - throw; + // Binary export has no type info so we only do caller-directed interpretation of data. + var concreteTypeInfo = typeInfo.MakeConcreteForField( + Field.CreateUnspecified(typeInfo.PgTypeId ?? ((PgProviderTypeInfo)typeInfo).GetDefault(null).PgTypeId)); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, options, concreteTypeInfo.PgTypeId, resolved: true); + return new(concreteTypeInfo, concreteTypeInfo.BindField(DataFormat.Binary)); + + PgTypeId GetRepresentationalOrDefault(string dataTypeName) + { + var type = options.DatabaseInfo.GetPostgresType(dataTypeName); + return options.ToCanonicalTypeId(type.GetRepresentationalType()); + } } } @@ -322,57 +366,83 @@ public bool IsNull { get { - ReadColumnLenIfNeeded(false).GetAwaiter().GetResult(); - return _columnLen == -1; + ThrowIfNotOnRow(); + if (!IsInitializedAndAtStart) + MoveNextColumn(resumableOp: true); + + return PgReader.FieldIsDbNull; } } /// /// Skips the current column without interpreting its value. /// - public void Skip() => Skip(false).GetAwaiter().GetResult(); + public void Skip() + { + ThrowIfNotOnRow(); + + if (!IsInitializedAndAtStart) + MoveNextColumn(resumableOp: false); + + PgReader.Commit(); + } /// /// Skips the current column without interpreting its value. /// - public Task SkipAsync(CancellationToken cancellationToken = default) + public async Task SkipAsync(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return Skip(true, cancellationToken); - } - - async Task Skip(bool async, CancellationToken cancellationToken = default) - { - CheckDisposed(); + ThrowIfNotOnRow(); using var registration = _connector.StartNestedCancellableOperation(cancellationToken); - await ReadColumnLenIfNeeded(async); - if (_columnLen != -1) - await _buf.Skip(_columnLen, async); + if (!IsInitializedAndAtStart) + await MoveNextColumnAsync(resumableOp: false).ConfigureAwait(false); - _columnLen = int.MinValue; - _column++; + await PgReader.CommitAsync().ConfigureAwait(false); } #endregion #region Utilities - async Task ReadColumnLenIfNeeded(bool async) + bool IsInitializedAndAtStart => PgReader.Initialized && (PgReader.FieldIsDbNull || PgReader.FieldAtStart); + + void MoveNextColumn(bool resumableOp) { - if (_columnLen == int.MinValue) - { - await _buf.Ensure(4, async); - _columnLen = _buf.ReadInt32(); - _leftToReadInDataMsg -= 4; - } + PgReader.Commit(); + + if (_column + 1 == NumColumns) + ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); + _column++; + _buf.Ensure(sizeof(int)); + var columnLen = _buf.ReadInt32(); + PgReader.Init(DataFormat.Binary, columnLen, resumableOp); + } + + async ValueTask MoveNextColumnAsync(bool resumableOp) + { + await PgReader.CommitAsync().ConfigureAwait(false); + + if (_column + 1 == NumColumns) + ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); + _column++; + await _buf.Ensure(sizeof(int), async: true).ConfigureAwait(false); + var columnLen = _buf.ReadInt32(); + PgReader.Init(DataFormat.Binary, columnLen, resumableOp); + } + + void ThrowIfNotOnRow() + { + ThrowIfDisposed(); + if (_column is BeforeRow) + ThrowHelper.ThrowInvalidOperationException("Not reading a row"); } - void CheckDisposed() + void ThrowIfDisposed() { - if (_isDisposed) - throw new ObjectDisposedException(GetType().FullName, "The COPY operation has already ended."); + if (_state == ExporterState.Disposed) + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlBinaryExporter), "The COPY operation has already ended."); } #endregion @@ -382,7 +452,7 @@ void CheckDisposed() /// /// Cancels an ongoing export. /// - public void Cancel() => _connector.PerformUserCancellation(); + public void Cancel() => _connector.PerformImmediateUserCancellation(); /// /// Async cancels an ongoing export. @@ -396,72 +466,113 @@ public Task CancelAsync() /// /// Completes that binary export and sets the connection back to idle state /// - public void Dispose() => DisposeAsync(false).GetAwaiter().GetResult(); + public void Dispose() => DisposeAsync(async: false).GetAwaiter().GetResult(); /// /// Async completes that binary export and sets the connection back to idle state /// /// - public ValueTask DisposeAsync() - { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsync(true); - } + public ValueTask DisposeAsync() => DisposeAsync(async: true); async ValueTask DisposeAsync(bool async) { - if (_isDisposed) + if (_state == ExporterState.Disposed) return; - if (_isConsumed) - { - LogMessages.BinaryCopyOperationCompleted(_copyLogger, _rowsExported, _connector.Id); - } - else if (!_connector.IsBroken) + try { - try + if (_state is ExporterState.Consumed or ExporterState.Uninitialized) { - using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); - // Finish the current CopyData message - _buf.Skip(_leftToReadInDataMsg); - // Read to the end - _connector.SkipUntil(BackendMessageCode.CopyDone); - // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + LogMessages.BinaryCopyOperationCompleted(_copyLogger, _rowsExported, _connector.Id); + TraceExportStop(); } - catch (OperationCanceledException e) when (e.InnerException is PostgresException pg && pg.SqlState == PostgresErrorCodes.QueryCanceled) + else if (!_connector.IsBroken) { - LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); + try + { + using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); + // Be sure to commit the reader. + if (async) + await PgReader.CommitAsync().ConfigureAwait(false); + else + PgReader.Commit(); + // Finish the current CopyData message + await _buf.Skip(async, checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition))).ConfigureAwait(false); + // Read to the end + _connector.SkipUntil(BackendMessageCode.CopyDone); + // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + + TraceExportStop(); + } + catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) + { + LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); + TraceExportStop(); + } + catch (Exception e) + { + LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); + TraceSetException(e); + } } - catch (Exception e) + } + finally + { + _connector.EndUserAction(); + Cleanup(); + } + + void Cleanup() + { + Debug.Assert(_state != ExporterState.Disposed); + var connector = _connector; + + if (!ReferenceEquals(connector, null)) { - LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); + connector.CurrentCopyOperation = null; + _connector = null!; } - } - _connector.EndUserAction(); - Cleanup(); + _buf = null!; + _state = ExporterState.Disposed; + } } -#pragma warning disable CS8625 - void Cleanup() + #endregion + + #region Tracing + + void TraceExportStop() { - Debug.Assert(!_isDisposed); - var connector = _connector; + if (_activity is not null) + { + NpgsqlActivitySource.CopyStop(_activity, _rowsExported); + _activity = null; + } + } - if (connector != null) + void TraceSetException(Exception exception) + { + if (_activity is not null) { - connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); - _connector = null; + NpgsqlActivitySource.SetException(_activity, exception); + _activity = null; } + } + + #endregion Tracing - _typeMapper = null; - _buf = null; - _isDisposed = true; + #region Enums + + enum ExporterState + { + Uninitialized, + Ready, + Consumed, + Disposed } -#pragma warning restore CS8625 - #endregion + #endregion Enums } diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 3da41700a5..ef73700089 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -1,12 +1,13 @@ using System; using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; using NpgsqlTypes; +using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; namespace Npgsql; @@ -25,7 +26,7 @@ public sealed class NpgsqlBinaryImporter : ICancelable NpgsqlConnector _connector; NpgsqlWriteBuffer _buf; - ImporterState _state; + ImporterState _state = ImporterState.Uninitialized; /// /// The number of columns in the current (not-yet-written) row. @@ -36,13 +37,16 @@ public sealed class NpgsqlBinaryImporter : ICancelable /// /// The number of columns, as returned from the backend in the CopyInResponse. /// - internal int NumColumns { get; private set; } + int NumColumns => _params.Length; bool InMiddleOfRow => _column != -1 && _column != NumColumns; NpgsqlParameter?[] _params; readonly ILogger _copyLogger; + PgWriter _pgWriter = null!; // Setup in Init + + Activity? _activity; /// /// Current timeout @@ -51,9 +55,9 @@ public TimeSpan Timeout { set { - _buf.Timeout = value; - // While calling Complete(), we're using the connector, which overwrites the buffer's timeout with it's own - _connector.UserTimeout = (int)value.TotalMilliseconds; + var timeout = value > TimeSpan.Zero ? value : InfiniteTimeout.InfiniteTimeSpan; + _buf.Timeout = timeout; + _connector.ReadBuffer.Timeout = timeout; } } @@ -72,38 +76,51 @@ internal NpgsqlBinaryImporter(NpgsqlConnector connector) internal async Task Init(string copyFromCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyFromCommand, async, cancellationToken); - await _connector.Flush(async, cancellationToken); - - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + Debug.Assert(_activity is null); + _activity = _connector.TraceCopyStart(copyFromCommand, "COPY FROM"); - CopyInResponseMessage copyInResponse; - var msg = await _connector.ReadMessage(async); - switch (msg.Code) + try { - case BackendMessageCode.CopyInResponse: - copyInResponse = (CopyInResponseMessage) msg; - if (!copyInResponse.IsBinary) + await _connector.WriteQuery(copyFromCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + CopyInResponseMessage copyInResponse; + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) { - throw _connector.Break( - new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", - nameof(copyFromCommand))); + case BackendMessageCode.CopyInResponse: + copyInResponse = (CopyInResponseMessage)msg; + if (!copyInResponse.IsBinary) + { + throw _connector.Break( + new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", + nameof(copyFromCommand))); + } + break; + case BackendMessageCode.CommandComplete: + throw new InvalidOperationException( + "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + + "Note that your data has been successfully imported/exported."); + default: + throw _connector.UnexpectedMessageReceived(msg.Code); } - break; - case BackendMessageCode.CommandComplete: - throw new InvalidOperationException( - "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + - "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + - "Note that your data has been successfully imported/exported."); - default: - throw _connector.UnexpectedMessageReceived(msg.Code); - } - NumColumns = copyInResponse.NumColumns; - _params = new NpgsqlParameter[NumColumns]; - _rowsImported = 0; - _buf.StartCopyMode(); - WriteHeader(); + _state = ImporterState.Ready; + _params = new NpgsqlParameter[copyInResponse.NumColumns]; + _rowsImported = 0; + _buf.StartCopyMode(); + WriteHeader(); + // Only init after header. + _pgWriter = _buf.GetWriter(_connector.DatabaseInfo); + } + catch (Exception e) + { + TraceSetException(e); + throw; + } } void WriteHeader() @@ -125,25 +142,20 @@ void WriteHeader() /// /// Starts writing a single row, must be invoked before writing any columns. /// - public Task StartRowAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return StartRow(true, cancellationToken); - } + public Task StartRowAsync(CancellationToken cancellationToken = default) => StartRow(async: true, cancellationToken); async Task StartRow(bool async, CancellationToken cancellationToken = default) { CheckReady(); - - if (_column != -1 && _column != NumColumns) - ThrowHelper.ThrowInvalidOperationException_BinaryImportParametersMismatch(NumColumns, _column); + cancellationToken.ThrowIfCancellationRequested(); + if (_column is not -1 && _column != NumColumns) + ThrowColumnMismatch(); if (_buf.WriteSpaceLeft < 2) - await _buf.Flush(async, cancellationToken); - _buf.WriteInt16(NumColumns); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); + _buf.WriteInt16((short)NumColumns); + _pgWriter.RefreshBuffer(); _column = 0; _rowsImported++; } @@ -157,7 +169,8 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public void Write([AllowNull] T value) => Write(value, false).GetAwaiter().GetResult(); + public void Write(T value) + => Write(async: false, value, npgsqlDbType: null, dataTypeName: null).GetAwaiter().GetResult(); /// /// Writes a single column in the current row. @@ -171,29 +184,8 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public Task WriteAsync([AllowNull] T value, CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return Write(value, true, cancellationToken); - } - - Task Write([AllowNull] T value, bool async, CancellationToken cancellationToken = default) - { - CheckColumnIndex(); - - var p = _params[_column]; - if (p == null) - { - // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) - ? new NpgsqlParameter() - : new NpgsqlParameter(); - } - - return Write(value, p, async, cancellationToken); - } + public Task WriteAsync(T value, CancellationToken cancellationToken = default) + => Write(async: true, value, npgsqlDbType: null, dataTypeName: null, cancellationToken); /// /// Writes a single column in the current row as type . @@ -206,8 +198,8 @@ Task Write([AllowNull] T value, bool async, CancellationToken cancellationTok /// must be specified as . /// /// The .NET type of the column to be written. - public void Write([AllowNull] T value, NpgsqlDbType npgsqlDbType) => - Write(value, npgsqlDbType, false).GetAwaiter().GetResult(); + public void Write(T value, NpgsqlDbType npgsqlDbType) => + Write(async: false, value, npgsqlDbType, dataTypeName: null).GetAwaiter().GetResult(); /// /// Writes a single column in the current row as type . @@ -223,33 +215,8 @@ public void Write([AllowNull] T value, NpgsqlDbType npgsqlDbType) => /// An optional token to cancel the asynchronous operation. The default value is . /// /// The .NET type of the column to be written. - public Task WriteAsync([AllowNull] T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return Write(value, npgsqlDbType, true, cancellationToken); - } - - Task Write([AllowNull] T value, NpgsqlDbType npgsqlDbType, bool async, CancellationToken cancellationToken = default) - { - CheckColumnIndex(); - - var p = _params[_column]; - if (p == null) - { - // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) - ? new NpgsqlParameter() - : new NpgsqlParameter(); - p.NpgsqlDbType = npgsqlDbType; - } - - if (npgsqlDbType != p.NpgsqlDbType) - throw new InvalidOperationException($"Can't change {nameof(p.NpgsqlDbType)} from {p.NpgsqlDbType} to {npgsqlDbType}"); - - return Write(value, p, async, cancellationToken); - } + public Task WriteAsync(T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) + => Write(async: true, value, npgsqlDbType, dataTypeName: null, cancellationToken); /// /// Writes a single column in the current row as type . @@ -260,8 +227,8 @@ Task Write([AllowNull] T value, NpgsqlDbType npgsqlDbType, bool async, Cancel /// the database. This parameter and be used to unambiguously specify the type. /// /// The .NET type of the column to be written. - public void Write([AllowNull] T value, string dataTypeName) => - Write(value, dataTypeName, false).GetAwaiter().GetResult(); + public void Write(T value, string dataTypeName) => + Write(async: false, value, npgsqlDbType: null, dataTypeName).GetAwaiter().GetResult(); /// /// Writes a single column in the current row as type . @@ -275,66 +242,93 @@ public void Write([AllowNull] T value, string dataTypeName) => /// An optional token to cancel the asynchronous operation. The default value is . /// /// The .NET type of the column to be written. - public Task WriteAsync([AllowNull] T value, string dataTypeName, CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return Write(value, dataTypeName, true, cancellationToken); - } + public Task WriteAsync(T value, string dataTypeName, CancellationToken cancellationToken = default) + => Write(async: true, value, npgsqlDbType: null, dataTypeName, cancellationToken); - Task Write([AllowNull] T value, string dataTypeName, bool async, CancellationToken cancellationToken = default) + Task Write(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTypeName, CancellationToken cancellationToken = default) { - CheckColumnIndex(); + // Handle DBNull: + // 1. when T = DBNull for backwards compatibility, DBNull as a type normally won't find a mapping. + // 2. when T = object we resolve oid 0 if DBNull is the first value, later column value oids would needlessly be limited to oid 0. + // Also handle null values for object typed parameters, these parameters require non null values to be seen as set. + if (typeof(T) == typeof(DBNull) || (typeof(T) == typeof(object) && value is null or DBNull)) + return WriteNull(async, cancellationToken); - var p = _params[_column]; - if (p == null) + return Core(async, value, npgsqlDbType, dataTypeName, cancellationToken); + + async Task Core(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTypeName, CancellationToken cancellationToken = default) { - // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) - ? new NpgsqlParameter() - : new NpgsqlParameter(); - p.DataTypeName = dataTypeName; - } + CheckReady(); + cancellationToken.ThrowIfCancellationRequested(); + CheckColumnIndex(); + + // Create the parameter objects for the first row or if the value type changes. + var newParam = false; + if (_params[_column] is not NpgsqlParameter param) + { + newParam = true; + param = new NpgsqlParameter(); + if (npgsqlDbType is not null) + param._npgsqlDbType = npgsqlDbType; + if (dataTypeName is not null) + param._dataTypeName = dataTypeName; + } - //if (dataTypeName!= p.DataTypeName) - // throw new InvalidOperationException($"Can't change {nameof(p.DataTypeName)} from {p.DataTypeName} to {dataTypeName}"); + // We only retrieve previous values if anything actually changed. + // For object typed parameters we must do so whenever setting NpgsqlParameter.Value would reset the type info. + PgTypeInfo? previousTypeInfo = null; + PgConcreteTypeInfo? previousConcreteTypeInfo = null; + if (!newParam && ( + (typeof(T) == typeof(object) && param.ShouldResetObjectTypeInfo(value)) + || param._npgsqlDbType != npgsqlDbType + || param._dataTypeName != dataTypeName)) + { + param.GetResolutionInfo(out previousTypeInfo, out previousConcreteTypeInfo); + if (!newParam) + { + param.ResetDbType(); + if (npgsqlDbType is not null) + param._npgsqlDbType = npgsqlDbType; + if (dataTypeName is not null) + param._dataTypeName = dataTypeName; + } + } - return Write(value, p, async, cancellationToken); - } + // These actions can reset or change the type info, we'll check afterwards whether we're still consistent with the original values. + param.TypedValue = value; + param.ResolveTypeInfo(_connector.SerializerOptions, _connector.DbTypeResolver); - async Task Write([AllowNull] T value, NpgsqlParameter param, bool async, CancellationToken cancellationToken = default) - { - CheckReady(); - if (_column == -1) - throw new InvalidOperationException("A row hasn't been started"); + if (previousTypeInfo is not null && previousConcreteTypeInfo is not null && param.PgTypeId != previousConcreteTypeInfo.PgTypeId) + { + var currentPgTypeId = param.PgTypeId; + // We should only rollback values when the stored instance was used. We'll throw before writing the new instance back anyway. + // Also always rolling back could set PgTypeInfos that were resolved for a type that doesn't match the T of the NpgsqlParameter. + if (!newParam) + param.SetResolutionInfo(previousTypeInfo, previousConcreteTypeInfo); + throw new InvalidOperationException($"Write for column {_column} resolves to a different PostgreSQL type: {currentPgTypeId} than the first row resolved to ({previousConcreteTypeInfo.PgTypeId}). " + + $"Please make sure to use clr types that resolve to the same PostgreSQL type across rows. " + + $"Alternatively pass the same NpgsqlDbType or DataTypeName to ensure the PostgreSQL type ends up to be identical." ); + } - if (value == null || value is DBNull) - { - await WriteNull(async, cancellationToken); - return; - } + if (newParam) + _params[_column] = param; - if (typeof(T) == typeof(object)) - { - param.Value = value; - } - else - { - if (param is not NpgsqlParameter typedParam) + param.Bind(out _, out _, requiredFormat: DataFormat.Binary); + + try { - _params[_column] = typedParam = new NpgsqlParameter(); - typedParam.NpgsqlDbType = param.NpgsqlDbType; - param = typedParam; + await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : FlushMode.Blocking), cancellationToken) + .ConfigureAwait(false); } - typedParam.TypedValue = value; + catch (Exception ex) + { + TraceSetException(ex); + _connector.Break(ex); + throw; + } + + _column++; } - param.ResolveHandler(_connector.TypeMapper); - param.ValidateAndGetLength(); - param.LengthCache?.Rewind(); - await param.WriteWithLength(_buf, async, cancellationToken); - param.LengthCache?.Clear(); - _column++; } /// @@ -345,24 +339,20 @@ async Task Write([AllowNull] T value, NpgsqlParameter param, bool async, Canc /// /// Writes a single null column value. /// - public Task WriteNullAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return WriteNull(true, cancellationToken); - } + public Task WriteNullAsync(CancellationToken cancellationToken = default) => WriteNull(async: true, cancellationToken); async Task WriteNull(bool async, CancellationToken cancellationToken = default) { CheckReady(); - if (_column == -1) - throw new InvalidOperationException("A row hasn't been started"); + if (cancellationToken.IsCancellationRequested) + cancellationToken.ThrowIfCancellationRequested(); + CheckColumnIndex(); if (_buf.WriteSpaceLeft < 4) - await _buf.Flush(async, cancellationToken); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.WriteInt32(-1); + _pgWriter.RefreshBuffer(); _column++; } @@ -372,7 +362,7 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) /// on each value. /// /// An array of column values to be written as a single row - public void WriteRow(params object[] values) => WriteRow(false, CancellationToken.None, values).GetAwaiter().GetResult(); + public void WriteRow(params object?[] values) => WriteRow(false, CancellationToken.None, values).GetAwaiter().GetResult(); /// /// Writes an entire row of columns. @@ -383,25 +373,30 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) /// An optional token to cancel the asynchronous operation. The default value is . /// /// An array of column values to be written as a single row - public Task WriteRowAsync(CancellationToken cancellationToken = default, params object[] values) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return WriteRow(true, cancellationToken, values); - } + public Task WriteRowAsync(CancellationToken cancellationToken = default, params object?[] values) + => WriteRow(async: true, cancellationToken, values); - async Task WriteRow(bool async, CancellationToken cancellationToken = default, params object[] values) + async Task WriteRow(bool async, CancellationToken cancellationToken = default, params object?[] values) { - await StartRow(async, cancellationToken); + await StartRow(async, cancellationToken).ConfigureAwait(false); foreach (var value in values) - await Write(value, async, cancellationToken); + await Write(async, value, npgsqlDbType: null, dataTypeName: null, cancellationToken).ConfigureAwait(false); } void CheckColumnIndex() { - if (_column >= NumColumns) - ThrowHelper.ThrowInvalidOperationException_BinaryImportParametersMismatch(NumColumns, _column + 1); + if (_column is -1 || _column >= NumColumns) + Throw(); + + [MethodImpl(MethodImplOptions.NoInlining)] + void Throw() + { + if (_column is -1) + throw new InvalidOperationException("A row hasn't been started"); + + if (_column >= NumColumns) + ThrowColumnMismatch(); + } } #endregion @@ -416,13 +411,7 @@ void CheckColumnIndex() /// /// Completes the import operation. The writer is unusable after this operation. /// - public ValueTask CompleteAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return new ValueTask(Task.FromCanceled(cancellationToken)); - using (NoSynchronizationContextScope.Enter()) - return Complete(true, cancellationToken); - } + public ValueTask CompleteAsync(CancellationToken cancellationToken = default) => Complete(async: true, cancellationToken); async ValueTask Complete(bool async, CancellationToken cancellationToken = default) { @@ -432,24 +421,29 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken if (InMiddleOfRow) { - await Cancel(async, cancellationToken); + await Cancel(async, cancellationToken).ConfigureAwait(false); throw new InvalidOperationException("Binary importer closed in the middle of a row, cancelling import."); } try { - await WriteTrailer(async, cancellationToken); - await _buf.Flush(async, cancellationToken); + // Write trailer + if (_buf.WriteSpaceLeft < 2) + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); + _buf.WriteInt16(-1); + + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.EndCopyMode(); - await _connector.WriteCopyDone(async, cancellationToken); - await _connector.Flush(async, cancellationToken); - var cmdComplete = Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + await _connector.WriteCopyDone(async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + var cmdComplete = Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); _state = ImporterState.Committed; return cmdComplete.Rows; } - catch + catch (Exception e) { + TraceSetException(e); Cleanup(); throw; } @@ -457,7 +451,7 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken void ICancelable.Cancel() => Close(); - async Task ICancelable.CancelAsync() => await CloseAsync(); + async Task ICancelable.CancelAsync() => await CloseAsync().ConfigureAwait(false); /// /// @@ -465,8 +459,8 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// public void Dispose() => Close(); @@ -476,26 +470,22 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// - public ValueTask DisposeAsync() - { - using (NoSynchronizationContextScope.Enter()) - return CloseAsync(true); - } + public ValueTask DisposeAsync() => CloseAsync(true); async Task Cancel(bool async, CancellationToken cancellationToken = default) { _state = ImporterState.Cancelled; _buf.Clear(); _buf.EndCopyMode(); - await _connector.WriteCopyFail(async, cancellationToken); - await _connector.Flush(async, cancellationToken); + await _connector.WriteCopyFail(async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); try { using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); // The CopyFail should immediately trigger an exception from the read above. throw _connector.Break( new NpgsqlException("Expected ErrorResponse when cancelling COPY but got: " + msg.Code)); @@ -513,10 +503,10 @@ async Task Cancel(bool async, CancellationToken cancellationToken = default) /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// - public void Close() => CloseAsync(false).GetAwaiter().GetResult(); + public void Close() => CloseAsync(async: false).GetAwaiter().GetResult(); /// /// @@ -524,26 +514,22 @@ async Task Cancel(bool async, CancellationToken cancellationToken = default) /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// - public ValueTask CloseAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return new ValueTask(Task.FromCanceled(cancellationToken)); - using (NoSynchronizationContextScope.Enter()) - return CloseAsync(true, cancellationToken); - } + public ValueTask CloseAsync(CancellationToken cancellationToken = default) => CloseAsync(async: true, cancellationToken); async ValueTask CloseAsync(bool async, CancellationToken cancellationToken = default) { + cancellationToken.ThrowIfCancellationRequested(); switch (_state) { case ImporterState.Disposed: return; case ImporterState.Ready: - await Cancel(async, cancellationToken); + await Cancel(async, cancellationToken).ConfigureAwait(false); break; + case ImporterState.Uninitialized: case ImporterState.Cancelled: case ImporterState.Committed: break; @@ -551,7 +537,7 @@ async ValueTask CloseAsync(bool async, CancellationToken cancellationToken = def throw new Exception("Invalid state: " + _state); } - _connector.EndUserAction(); + TraceImportStop(); Cleanup(); } @@ -566,38 +552,42 @@ void Cleanup() if (connector != null) { + connector.EndUserAction(); connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null; } + // Deterministically release each parameter's provider-produced write state and binding state. + // ResetDbType cascades through ResetTypeInfo which disposes both; clearing the type hints is + // incidental (the params aren't reused after the importer closes). GC would eventually catch + // anything we miss, but we'd rather not leak pooled buffers held in write state. + if (_params is not null) + { + foreach (var p in _params) + p?.ResetDbType(); + } + _buf = null; _state = ImporterState.Disposed; } #pragma warning restore CS8625 - async Task WriteTrailer(bool async, CancellationToken cancellationToken = default) - { - if (_buf.WriteSpaceLeft < 2) - await _buf.Flush(async, cancellationToken); - _buf.WriteInt16(-1); - } - void CheckReady() { - switch (_state) - { - case ImporterState.Ready: - return; - case ImporterState.Disposed: - throw new ObjectDisposedException(GetType().FullName, "The COPY operation has already ended."); - case ImporterState.Cancelled: - throw new InvalidOperationException("The COPY operation has already been cancelled."); - case ImporterState.Committed: - throw new InvalidOperationException("The COPY operation has already been committed."); - default: - throw new Exception("Invalid state: " + _state); - } + if (_state is not ImporterState.Ready and var state) + Throw(state); + + [MethodImpl(MethodImplOptions.NoInlining)] + static void Throw(ImporterState state) + => throw (state switch + { + ImporterState.Uninitialized => throw new InvalidOperationException("The COPY operation has not been initialized."), + ImporterState.Disposed => new ObjectDisposedException(typeof(NpgsqlBinaryImporter).FullName, + "The COPY operation has already ended."), + ImporterState.Cancelled => new InvalidOperationException("The COPY operation has already been cancelled."), + ImporterState.Committed => new InvalidOperationException("The COPY operation has already been committed."), + _ => new Exception("Invalid state: " + state) + }); } #endregion @@ -606,6 +596,7 @@ void CheckReady() enum ImporterState { + Uninitialized, Ready, Committed, Cancelled, @@ -613,4 +604,41 @@ enum ImporterState } #endregion Enums -} \ No newline at end of file + + void ThrowColumnMismatch() + => throw new InvalidOperationException($"The binary import operation was started with {NumColumns} column(s), but {_column + 1} value(s) were provided."); + + #region Tracing + + void TraceImportStop() + { + if (_activity is not null) + { + switch (_state) + { + case ImporterState.Committed: + NpgsqlActivitySource.CopyStop(_activity, _rowsImported); + break; + case ImporterState.Cancelled: + NpgsqlActivitySource.CopyStop(_activity, rows: 0); + break; + default: + Debug.Fail("Invalid state: " + _state); + break; + } + + _activity = null; + } + } + + void TraceSetException(Exception exception) + { + if (_activity is not null) + { + NpgsqlActivitySource.SetException(_activity, exception); + _activity = null; + } + } + + #endregion Tracing +} diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index e3d07c3ae9..c8fef976b2 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -4,7 +4,6 @@ using System.Data; using System.Data.Common; using System.Diagnostics; -using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading; @@ -17,6 +16,7 @@ using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Properties; +using System.Collections; namespace Npgsql; @@ -44,16 +44,13 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent string _commandText; CommandBehavior _behavior; int? _timeout; - readonly NpgsqlParameterCollection _parameters; + internal NpgsqlParameterCollection? _parameters; - /// - /// Whether this is wrapped by an . - /// - internal bool IsWrappedByBatch { get; } + internal NpgsqlBatch? WrappingBatch { get; } internal List InternalBatchCommands { get; } - Activity? CurrentActivity; + internal Activity? CurrentActivity { get; private set; } /// /// Returns details about each statement that this command has executed. @@ -69,7 +66,7 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent /// /// Whether this command is cached by and returned by . /// - internal bool IsCached { get; set; } + internal bool IsCacheable { get; set; } #if DEBUG internal static bool EnableSqlRewriting; @@ -81,9 +78,8 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent internal bool EnableErrorBarriers { get; set; } - static readonly List EmptyParameters = new(); - - static readonly SingleThreadSynchronizationContext SingleThreadSynchronizationContext = new("NpgsqlRemainingAsyncSendWorker"); + static readonly TaskScheduler ConstrainedConcurrencyScheduler = + new ConcurrentExclusiveSchedulerPair(TaskScheduler.Default, Math.Max(1, Environment.ProcessorCount / 2)).ConcurrentScheduler; #endregion Fields @@ -124,7 +120,6 @@ public NpgsqlCommand(string? cmdText, NpgsqlConnection? connection) { GC.SuppressFinalize(this); InternalBatchCommands = new List(1); - _parameters = new NpgsqlParameterCollection(); _commandText = cmdText ?? string.Empty; InternalConnection = connection; CommandType = CommandType.Text; @@ -144,13 +139,13 @@ public NpgsqlCommand(string? cmdText, NpgsqlConnection? connection, NpgsqlTransa /// /// Used when this instance is wrapped inside an . /// - internal NpgsqlCommand(int batchCommandCapacity, NpgsqlConnection? connection = null) + internal NpgsqlCommand(NpgsqlBatch batch, int batchCommandCapacity, NpgsqlConnection? connection = null) { GC.SuppressFinalize(this); InternalBatchCommands = new List(batchCommandCapacity); InternalConnection = connection; CommandType = CommandType.Text; - IsWrappedByBatch = true; + WrappingBatch = batch; // These can/should never be used in this mode _commandText = null!; @@ -163,12 +158,12 @@ internal NpgsqlCommand(string? cmdText, NpgsqlConnector connector) : this(cmdTex /// /// Used when this instance is wrapped inside an . /// - internal NpgsqlCommand(NpgsqlConnector connector, int batchCommandCapacity) - : this(batchCommandCapacity) + internal NpgsqlCommand(NpgsqlBatch batch, NpgsqlConnector connector, int batchCommandCapacity) + : this(batch, batchCommandCapacity) => _connector = connector; internal static NpgsqlCommand CreateCachedCommand(NpgsqlConnection connection) - => new(null, connection) { IsCached = true }; + => new(null, connection) { IsCacheable = true }; #endregion Constructors @@ -185,17 +180,48 @@ public override string CommandText get => _commandText; set { - Debug.Assert(!IsWrappedByBatch); + Debug.Assert(WrappingBatch is null); + + switch (State) + { + case CommandState.Idle: + break; + case CommandState.Disposed: + ThrowHelper.ThrowObjectDisposedException(typeof(NpgsqlCommand).FullName); + break; + case CommandState.InProgress: + default: + ThrowHelper.ThrowInvalidOperationException("An open data reader exists for this command."); + break; + } - _commandText = State == CommandState.Idle - ? value ?? string.Empty - : throw new InvalidOperationException("An open data reader exists for this command."); + _commandText = value ?? string.Empty; ResetPreparation(); // TODO: Technically should do this also if the parameter list (or type) changes } } + string GetBatchFullCommandText() + { + Debug.Assert(WrappingBatch is not null); + if (InternalBatchCommands.Count == 0) + return string.Empty; + if (InternalBatchCommands.Count == 1) + return InternalBatchCommands[0].CommandText; + // TODO: Potentially cache on connector/command? + var sb = new StringBuilder(); + sb.Append(InternalBatchCommands[0].CommandText); + for (var i = 1; i < InternalBatchCommands.Count; i++) + { + sb + .Append(';') + .AppendLine() + .Append(InternalBatchCommands[i].CommandText); + } + return sb.ToString(); + } + /// /// Gets or sets the wait time (in seconds) before terminating the attempt to execute a command and generating an error. /// @@ -206,9 +232,7 @@ public override int CommandTimeout get => _timeout ?? (InternalConnection?.CommandTimeout ?? DefaultTimeout); set { - if (value < 0) { - throw new ArgumentOutOfRangeException(nameof(value), value, "CommandTimeout can't be less than zero."); - } + ArgumentOutOfRangeException.ThrowIfNegative(value); _timeout = value; } @@ -237,9 +261,12 @@ protected override DbConnection? DbConnection if (InternalConnection == value) return; - InternalConnection = State == CommandState.Idle - ? (NpgsqlConnection?)value - : throw new InvalidOperationException("An open data reader exists for this command."); + InternalConnection = State switch + { + CommandState.Idle => (NpgsqlConnection?)value, + CommandState.Disposed => throw new ObjectDisposedException(typeof(NpgsqlCommand).FullName), + _ => throw new InvalidOperationException("An open data reader exists for this command."), + }; Transaction = null; } @@ -291,9 +318,24 @@ public override UpdateRowSource UpdatedRowSource /// /// Returns whether this query will execute as a prepared (compiled) query. /// - public bool IsPrepared => - _connectorPreparedOn == (InternalConnection?.Connector ?? _connector) && - InternalBatchCommands.Any() && InternalBatchCommands.All(s => s.PreparedStatement?.IsPrepared == true); + public bool IsPrepared + { + get + { + return _connectorPreparedOn == (InternalConnection?.Connector ?? _connector) && AllPrepared(); + + bool AllPrepared() + { + if (InternalBatchCommands.Count is 0) + return false; + + foreach (var s in InternalBatchCommands) + if (s.PreparedStatement is null || !s.PreparedStatement.IsPrepared) + return false; + return true; + } + } + } #endregion Public properties @@ -344,18 +386,6 @@ public bool[]? UnknownResultTypeList #endregion - #region Result Types Management - - /// - /// Marks result types to be used when using GetValue on a data reader, on a column-by-column basis. - /// Used for Entity Framework 5-6 compability. - /// Only primitive numerical types and DateTimeOffset are supported. - /// Set the whole array or just a value to null to use default type. - /// - internal Type[]? ObjectResultTypes { get; set; } - - #endregion - #region State management volatile int _state; @@ -375,7 +405,12 @@ internal CommandState State } } - internal void ResetPreparation() => _connectorPreparedOn = null; + internal void ResetPreparation() + { + _connectorPreparedOn = null; + foreach (var s in InternalBatchCommands) + s.ResetPreparation(); + } #endregion State management @@ -402,7 +437,7 @@ internal CommandState State /// Gets the . /// /// The parameters of the SQL statement or function (stored procedure). The default is an empty collection. - public new NpgsqlParameterCollection Parameters => _parameters; + public new NpgsqlParameterCollection Parameters => _parameters ??= []; #endregion @@ -434,14 +469,14 @@ internal void DeriveParameters() { var conn = CheckAndGetConnection(); Debug.Assert(conn is not null); + var connector = conn.Connector!; if (string.IsNullOrEmpty(CommandText)) throw new InvalidOperationException("CommandText property has not been initialized"); - using var _ = conn.StartTemporaryBindingScope(out var connector); - - if (InternalBatchCommands.Any(s => s.PreparedStatement?.IsExplicit == true)) - throw new NpgsqlException("Deriving parameters isn't supported for commands that are already prepared."); + foreach (var s in InternalBatchCommands) + if (s.PreparedStatement?.IsExplicit == true) + throw new NpgsqlException("Deriving parameters isn't supported for commands that are already prepared."); // Here we unprepare statements that possibly are auto-prepared Unprepare(); @@ -492,14 +527,14 @@ void DeriveParametersForFunction() throw new InvalidOperationException($"{CommandText} does not exist in pg_proc"); } - var typeMapper = c.InternalConnection!.Connector!.TypeMapper; + var serializerOptions = c.InternalConnection!.Connector!.SerializerOptions; for (var i = 0; i < types.Length; i++) { var param = new NpgsqlParameter(); - var (npgsqlDbType, postgresType) = typeMapper.GetTypeInfoByOid(types[i]); - + var postgresType = serializerOptions.DatabaseInfo.GetPostgresType(types[i]); + var npgsqlDbType = postgresType.DataTypeName.ToNpgsqlDbType(); param.DataTypeName = postgresType.DisplayName; param.PostgresType = postgresType; if (npgsqlDbType.HasValue) @@ -535,7 +570,7 @@ void DeriveParametersForQuery(NpgsqlConnector connector) { LogMessages.DerivingParameters(connector.CommandLogger, CommandText, connector.Id); - if (IsWrappedByBatch) + if (WrappingBatch is not null) foreach (var batchCommand in InternalBatchCommands) connector.SqlQueryParser.ParseRawQuery(batchCommand, connector.UseConformingStrings, deriveParameters: true); else @@ -545,58 +580,75 @@ void DeriveParametersForQuery(NpgsqlConnector connector) if (sendTask.IsFaulted) sendTask.GetAwaiter().GetResult(); - foreach (var batchCommand in InternalBatchCommands) + try { - Expect( - connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); - var paramTypeOIDs = Expect( - connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector).TypeOIDs; - - if (batchCommand.PositionalParameters.Count != paramTypeOIDs.Count) + foreach (var batchCommand in InternalBatchCommands) { - connector.SkipUntil(BackendMessageCode.ReadyForQuery); - Parameters.Clear(); - throw new NpgsqlException("There was a mismatch in the number of derived parameters between the Npgsql SQL parser and the PostgreSQL parser. Please report this as bug to the Npgsql developers (https://github.com/npgsql/npgsql/issues)."); - } + Expect( + connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); + var paramTypeOIDs = Expect( + connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector).TypeOIDs; - for (var i = 0; i < paramTypeOIDs.Count; i++) - { - try + if (batchCommand.PositionalParameters.Count != paramTypeOIDs.Count) { - var param = batchCommand.PositionalParameters[i]; - var paramOid = paramTypeOIDs[i]; - - var (npgsqlDbType, postgresType) = connector.TypeMapper.GetTypeInfoByOid(paramOid); - - if (param.NpgsqlDbType != NpgsqlDbType.Unknown && param.NpgsqlDbType != npgsqlDbType) - throw new NpgsqlException("The backend parser inferred different types for parameters with the same name. Please try explicit casting within your SQL statement or batch or use different placeholder names."); + connector.SkipUntil(BackendMessageCode.ReadyForQuery); + Parameters.Clear(); + throw new NpgsqlException( + "There was a mismatch in the number of derived parameters between the Npgsql SQL parser and the PostgreSQL parser. Please report this as bug to the Npgsql developers (https://github.com/npgsql/npgsql/issues)."); + } - param.DataTypeName = postgresType.DisplayName; - param.PostgresType = postgresType; - if (npgsqlDbType.HasValue) - param.NpgsqlDbType = npgsqlDbType.Value; + for (var i = 0; i < paramTypeOIDs.Count; i++) + { + try + { + var param = batchCommand.PositionalParameters[i]; + var paramOid = paramTypeOIDs[i]; + + var postgresType = connector.SerializerOptions.DatabaseInfo.GetPostgresType(paramOid); + // We want to keep any domain types visible on the parameter, it will internally do a representational lookup again if necessary. + var npgsqlDbType = postgresType.GetRepresentationalType().DataTypeName.ToNpgsqlDbType(); + if (param.NpgsqlDbType != NpgsqlDbType.Unknown && param.NpgsqlDbType != npgsqlDbType) + throw new NpgsqlException( + "The backend parser inferred different types for parameters with the same name. Please try explicit casting within your SQL statement or batch or use different placeholder names."); + + param.DataTypeName = postgresType.DisplayName; + param.PostgresType = postgresType; + if (npgsqlDbType.HasValue) + param.NpgsqlDbType = npgsqlDbType.Value; + } + catch + { + connector.SkipUntil(BackendMessageCode.ReadyForQuery); + Parameters.Clear(); + throw; + } } - catch + + var msg = connector.ReadMessage(async: false).GetAwaiter().GetResult(); + switch (msg.Code) { - connector.SkipUntil(BackendMessageCode.ReadyForQuery); - Parameters.Clear(); - throw; + case BackendMessageCode.RowDescription: + case BackendMessageCode.NoData: + break; + default: + throw connector.UnexpectedMessageReceived(msg.Code); } } - var msg = connector.ReadMessage(async: false).GetAwaiter().GetResult(); - switch (msg.Code) + Expect(connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); + } + finally + { + try { - case BackendMessageCode.RowDescription: - case BackendMessageCode.NoData: - break; - default: - throw connector.UnexpectedMessageReceived(msg.Code); + // Make sure sendTask is complete so we don't race against asynchronous flush + sendTask.GetAwaiter().GetResult(); + } + catch + { + // ignored } } - - Expect(connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); - sendTask.GetAwaiter().GetResult(); } } @@ -617,43 +669,41 @@ void DeriveParametersForQuery(NpgsqlConnector connector) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NETSTANDARD2_0 - public virtual Task PrepareAsync(CancellationToken cancellationToken = default) -#else public override Task PrepareAsync(CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Prepare(true, cancellationToken); - } + => Prepare(async: true, cancellationToken); Task Prepare(bool async, CancellationToken cancellationToken = default) { var connection = CheckAndGetConnection(); Debug.Assert(connection is not null); - if (connection.Settings.Multiplexing) - throw new NotSupportedException("Explicit preparation not supported with multiplexing"); var connector = connection.Connector!; var logger = connector.CommandLogger; var needToPrepare = false; - if (IsWrappedByBatch) + if (WrappingBatch is not null) { foreach (var batchCommand in InternalBatchCommands) { - batchCommand.Parameters.ProcessParameters(connector.TypeMapper, validateValues: false, CommandType); + batchCommand._parameters?.ProcessParameters(connector.ReloadableState, validateValues: false, batchCommand.CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); needToPrepare = batchCommand.ExplicitPrepare(connector) || needToPrepare; + batchCommand.ConnectorPreparedOn = connector; } if (logger.IsEnabled(LogLevel.Debug) && needToPrepare) - LogMessages.PreparingCommandExplicitly(logger, string.Join("; ", InternalBatchCommands.Select(c => c.CommandText)), connector.Id); + LogMessages.PreparingCommandExplicitly(logger, string.Join("; ", CommandTexts()), connector.Id); + + IEnumerable CommandTexts() + { + foreach (var c in InternalBatchCommands) + yield return c.CommandText; + } } else { - Parameters.ProcessParameters(connector.TypeMapper, validateValues: false, CommandType); + _parameters?.ProcessParameters(connector.ReloadableState, validateValues: false, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); foreach (var batchCommand in InternalBatchCommands) @@ -681,53 +731,71 @@ static async Task PrepareLong(NpgsqlCommand command, bool async, NpgsqlConnector if (sendTask.IsFaulted) sendTask.GetAwaiter().GetResult(); - // Loop over statements, skipping those that are already prepared (because they were persisted) - var isFirst = true; - foreach (var batchCommand in command.InternalBatchCommands) + try { - if (!batchCommand.IsPreparing) - continue; + // Loop over statements, skipping those that are already prepared (because they were persisted) + var isFirst = true; + foreach (var batchCommand in command.InternalBatchCommands) + { + if (!batchCommand.IsPreparing) + continue; - var pStatement = batchCommand.PreparedStatement!; + var pStatement = batchCommand.PreparedStatement!; + var replacedStatement = pStatement.StatementBeingReplaced; - if (pStatement.StatementBeingReplaced != null) - { - Expect(await connector.ReadMessage(async), connector); - pStatement.StatementBeingReplaced.CompleteUnprepare(); - pStatement.StatementBeingReplaced = null; + if (replacedStatement != null) + { + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + replacedStatement.CompleteUnprepare(); + + if (!replacedStatement.IsExplicit) + connector.PreparedStatementManager.AutoPrepared[replacedStatement.AutoPreparedSlotIndex] = null; + + pStatement.StatementBeingReplaced = null; + } + + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + var msg = await connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) + { + case BackendMessageCode.RowDescription: + // Clone the RowDescription for use with the prepared statement (the one we have is reused + // by the connection) + var description = ((RowDescriptionMessage)msg).Clone(); + command.FixupRowDescription(description, isFirst); + batchCommand.Description = description; + break; + case BackendMessageCode.NoData: + batchCommand.Description = null; + break; + default: + throw connector.UnexpectedMessageReceived(msg.Code); + } + + pStatement.State = PreparedState.Prepared; + connector.PreparedStatementManager.NumPrepared++; + batchCommand.IsPreparing = false; + isFirst = false; } - Expect(await connector.ReadMessage(async), connector); - Expect(await connector.ReadMessage(async), connector); - var msg = await connector.ReadMessage(async); - switch (msg.Code) + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + } + finally + { + try { - case BackendMessageCode.RowDescription: - // Clone the RowDescription for use with the prepared statement (the one we have is reused - // by the connection) - var description = ((RowDescriptionMessage)msg).Clone(); - command.FixupRowDescription(description, isFirst); - batchCommand.Description = description; - break; - case BackendMessageCode.NoData: - batchCommand.Description = null; - break; - default: - throw connector.UnexpectedMessageReceived(msg.Code); + // Make sure sendTask is complete so we don't race against asynchronous flush + if (async) + await sendTask.ConfigureAwait(false); + else + sendTask.GetAwaiter().GetResult(); + } + catch + { + // ignored } - - pStatement.State = PreparedState.Prepared; - connector.PreparedStatementManager.NumPrepared++; - batchCommand.IsPreparing = false; - isFirst = false; } - - Expect(await connector.ReadMessage(async), connector); - - if (async) - await sendTask; - else - sendTask.GetAwaiter().GetResult(); } LogMessages.CommandPreparedExplicitly(connector.CommandLogger, connector.Id); @@ -766,18 +834,21 @@ public void Unprepare() /// An optional token to cancel the asynchronous operation. The default value is . /// public Task UnprepareAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Unprepare(true, cancellationToken); - } + => Unprepare(async: true, cancellationToken); async Task Unprepare(bool async, CancellationToken cancellationToken = default) { var connection = CheckAndGetConnection(); Debug.Assert(connection is not null); - if (connection.Settings.Multiplexing) - throw new NotSupportedException("Explicit preparation not supported with multiplexing"); - if (InternalBatchCommands.All(s => !s.IsPrepared)) + + var forall = true; + foreach (var statement in InternalBatchCommands) + if (statement.IsPrepared) + { + forall = false; + break; + } + if (forall) return; var connector = connection.Connector!; @@ -786,15 +857,14 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) using (connector.StartUserAction(cancellationToken)) { - var sendTask = SendClose(connector, async, cancellationToken); - if (sendTask.IsFaulted) - sendTask.GetAwaiter().GetResult(); + // Just wait for SendClose to complete since each statement takes no more than 20 bytes + await SendClose(connector, async, cancellationToken).ConfigureAwait(false); foreach (var batchCommand in InternalBatchCommands) { if (batchCommand.PreparedStatement?.State == PreparedState.BeingUnprepared) { - Expect(await connector.ReadMessage(async), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); var pStatement = batchCommand.PreparedStatement; pStatement.CompleteUnprepare(); @@ -802,16 +872,11 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) if (!pStatement.IsExplicit) connector.PreparedStatementManager.AutoPrepared[pStatement.AutoPreparedSlotIndex] = null; - batchCommand.PreparedStatement = null; + batchCommand.ResetPreparation(); } } - Expect(await connector.ReadMessage(async), connector); - - if (async) - await sendTask; - else - sendTask.GetAwaiter().GetResult(); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); } } @@ -819,19 +884,19 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) #region Query analysis - internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStrings, NpgsqlBatchCommand? batchCommand) + internal void ProcessRawQuery(SqlQueryParser parser, bool standardConformingStrings, NpgsqlBatchCommand? batchCommand) { var (commandText, commandType, parameters) = batchCommand is null - ? (CommandText, CommandType, Parameters) - : (batchCommand.CommandText, batchCommand.CommandType, batchCommand.Parameters); + ? (CommandText, CommandType, _parameters) + : (batchCommand.CommandText, batchCommand.CommandType, batchCommand._parameters); if (string.IsNullOrEmpty(commandText)) - throw new InvalidOperationException("CommandText property has not been initialized"); + ThrowHelper.ThrowInvalidOperationException("CommandText property has not been initialized"); switch (commandType) { case CommandType.Text: - switch (parameters.PlaceholderType) + switch (parameters?.PlaceholderType ?? PlaceholderType.NoParameters) { case PlaceholderType.Positional: // In positional parameter mode, we don't need to parse/rewrite the CommandText or reorder the parameters - just use @@ -841,18 +906,28 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr { batchCommand = TruncateStatementsToOne(); batchCommand.FinalCommandText = CommandText; - batchCommand.PositionalParameters = Parameters.InternalList; + if (parameters is not null) + { + batchCommand.PositionalParameters = parameters.InternalList; + batchCommand._parameters = parameters; + } } else { batchCommand.FinalCommandText = batchCommand.CommandText; - batchCommand.PositionalParameters = batchCommand.Parameters.InternalList; + if (parameters is not null) + batchCommand.PositionalParameters = parameters.InternalList; } ValidateParameterCount(batchCommand); break; case PlaceholderType.NoParameters: + if (batchCommand is not null) + { + batchCommand.FinalCommandText = batchCommand.CommandText; + break; + } // Unless the EnableSqlRewriting AppContext switch is explicitly disabled, queries with no parameters are parsed just // like queries with named parameters, since they may contain a semicolon (legacy batching). if (EnableSqlRewriting) @@ -861,35 +936,31 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr case PlaceholderType.Named: if (!EnableSqlRewriting) - throw new NotSupportedException($"Named parameters are not supported when Npgsql.{nameof(EnableSqlRewriting)} is disabled"); - - // The parser is cached on NpgsqlConnector - unless we're in multiplexing mode. - parser ??= new SqlQueryParser(); + ThrowHelper.ThrowNotSupportedException($"Named parameters are not supported when Npgsql.{nameof(EnableSqlRewriting)} is disabled"); if (batchCommand is null) { parser.ParseRawQuery(this, standardConformingStrings); - if (InternalBatchCommands.Count > 1 && _parameters.HasOutputParameters) - throw new NotSupportedException("Commands with multiple queries cannot have out parameters"); + if (InternalBatchCommands.Count > 1 && _parameters?.HasOutputParameters == true) + ThrowHelper.ThrowNotSupportedException("Commands with multiple queries cannot have out parameters"); for (var i = 0; i < InternalBatchCommands.Count; i++) ValidateParameterCount(InternalBatchCommands[i]); } else { parser.ParseRawQuery(batchCommand, standardConformingStrings); - if (batchCommand.Parameters.HasOutputParameters) - throw new NotSupportedException("Batches cannot cannot have out parameters"); ValidateParameterCount(batchCommand); } break; case PlaceholderType.Mixed: - throw new NotSupportedException("Mixing named and positional parameters isn't supported"); + ThrowHelper.ThrowNotSupportedException("Mixing named and positional parameters isn't supported"); + break; default: - throw new ArgumentOutOfRangeException( - nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {Parameters.PlaceholderType}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {{0}}", _parameters?.PlaceholderType ?? PlaceholderType.NoParameters); + break; } break; @@ -902,48 +973,54 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr case CommandType.StoredProcedure: var sqlBuilder = new StringBuilder() .Append(EnableStoredProcedureCompatMode ? "SELECT * FROM " : "CALL ") - .Append(CommandText) + .Append(commandText) .Append('('); var isFirstParam = true; var seenNamedParam = false; - var inputParameters = new List(parameters.Count); - - for (var i = 0; i < parameters.Count; i++) + var inputParameters = NpgsqlBatchCommand.EmptyParameters; + if (parameters is not null) { - var parameter = parameters[i]; + inputParameters = new List(parameters.Count); + for (var i = 0; i < parameters.Count; i++) + { + var parameter = parameters[i]; - // With functions, output parameters are never present when calling the function (they only define the schema of the - // returned table). With stored procedures they must be specified in the CALL argument list (see below). - if (EnableStoredProcedureCompatMode && parameter.Direction == ParameterDirection.Output) - continue; + // With functions, output parameters are never present when calling the function (they only define the schema of the + // returned table). With stored procedures they must be specified in the CALL argument list (see below). + if (EnableStoredProcedureCompatMode && parameter.Direction == ParameterDirection.Output) + continue; - if (isFirstParam) - isFirstParam = false; - else - sqlBuilder.Append(", "); + if (parameter.Direction == ParameterDirection.ReturnValue) + continue; - if (parameter.IsPositional) - { - if (seenNamedParam) - throw new ArgumentException(NpgsqlStrings.PositionalParameterAfterNamed); - } - else - { - seenNamedParam = true; + if (isFirstParam) + isFirstParam = false; + else + sqlBuilder.Append(", "); - sqlBuilder - .Append('"') - .Append(parameter.TrimmedName.Replace("\"", "\"\"")) - .Append("\" := "); - } + if (parameter.IsPositional) + { + if (seenNamedParam) + ThrowHelper.ThrowArgumentException(NpgsqlStrings.PositionalParameterAfterNamed); + } + else + { + seenNamedParam = true; - if (parameter.Direction == ParameterDirection.Output) - sqlBuilder.Append("NULL"); - else - { - inputParameters.Add(parameter); - sqlBuilder.Append('$').Append(inputParameters.Count); + sqlBuilder + .Append('"') + .Append(parameter.TrimmedName.Replace("\"", "\"\"")) + .Append("\" := "); + } + + if (parameter.Direction == ParameterDirection.Output) + sqlBuilder.Append("NULL"); + else + { + inputParameters!.Add(parameter); + sqlBuilder.Append('$').Append(inputParameters.Count); + } } } @@ -951,20 +1028,21 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr batchCommand ??= TruncateStatementsToOne(); batchCommand.FinalCommandText = sqlBuilder.ToString(); + batchCommand._parameters = parameters; batchCommand.PositionalParameters.AddRange(inputParameters); ValidateParameterCount(batchCommand); break; default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {CommandType} of enum {nameof(CommandType)}. Please file a bug."); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(CommandType), $"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(CommandType)}. Please file a bug.", commandType); + break; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] static void ValidateParameterCount(NpgsqlBatchCommand batchCommand) { - if (batchCommand.PositionalParameters.Count > ushort.MaxValue) - throw new NpgsqlException($"A statement cannot have more than {ushort.MaxValue} parameters"); + if (batchCommand is { HasParameters: true, PositionalParameters.Count: > ushort.MaxValue }) + ThrowHelper.ThrowNpgsqlException("A statement cannot have more than 65535 parameters"); } } @@ -972,9 +1050,6 @@ static void ValidateParameterCount(NpgsqlBatchCommand batchCommand) #region Message Creation / Population - void BeginSend(NpgsqlConnector connector) - => connector.WriteBuffer.Timeout = TimeSpan.FromSeconds(CommandTimeout); - internal Task Write(NpgsqlConnector connector, bool async, bool flush, CancellationToken cancellationToken = default) { return (_behavior & CommandBehavior.SchemaOnly) == 0 @@ -985,10 +1060,12 @@ async Task WriteExecute(NpgsqlConnector connector, bool async, bool flush, Cance { NpgsqlBatchCommand? batchCommand = null; + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - // The following is only for deadlock avoidance when doing sync I/O (so never in multiplexing) - ForceAsyncIfNecessary(ref async, i); + // The following is only for deadlock avoidance when doing sync I/O + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); batchCommand = InternalBatchCommands[i]; var pStatement = batchCommand.PreparedStatement; @@ -1002,95 +1079,114 @@ async Task WriteExecute(NpgsqlConnector connector, bool async, bool flush, Cance // We may have a prepared statement that replaces an existing statement - close the latter first. if (pStatement?.StatementBeingReplaced != null) - await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken); + await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, batchCommand.PositionalParameters, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, + batchCommand.CurrentParametersReadOnly, async, cancellationToken).ConfigureAwait(false); await connector.WriteBind( - batchCommand.PositionalParameters, string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, + batchCommand.CurrentParametersReadOnly, + string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, i == 0 ? UnknownResultTypeList : null, - async, cancellationToken); + async, cancellationToken).ConfigureAwait(false); - await connector.WriteDescribe(StatementOrPortal.Portal, string.Empty, async, cancellationToken); + await connector.WriteDescribe(StatementOrPortal.Portal, [], async, cancellationToken).ConfigureAwait(false); } else { // The statement is already prepared, only a Bind is needed await connector.WriteBind( - batchCommand.PositionalParameters, string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, + batchCommand.CurrentParametersReadOnly, + string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, i == 0 ? UnknownResultTypeList : null, - async, cancellationToken); + async, cancellationToken).ConfigureAwait(false); } - await connector.WriteExecute(0, async, cancellationToken); + await connector.WriteExecute(0, async, cancellationToken).ConfigureAwait(false); if (batchCommand.AppendErrorBarrier ?? EnableErrorBarriers) - await connector.WriteSync(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); - if (pStatement != null) - pStatement.LastUsed = DateTime.UtcNow; + pStatement?.RefreshLastUsed(); } if (batchCommand is null || !(batchCommand.AppendErrorBarrier ?? EnableErrorBarriers)) { - await connector.WriteSync(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); } if (flush) - await connector.Flush(async, cancellationToken); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } async Task WriteExecuteSchemaOnly(NpgsqlConnector connector, bool async, bool flush, CancellationToken cancellationToken) { var wroteSomething = false; + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; + var pStatement = batchCommand.PreparedStatement; + + pStatement?.RefreshLastUsed(); + + Debug.Assert(batchCommand.FinalCommandText is not null); + + if (pStatement != null && !batchCommand.IsPreparing) + { + // Prepared, we already have the RowDescription + Debug.Assert(pStatement.IsPrepared); + continue; + } - if (batchCommand.PreparedStatement?.State == PreparedState.Prepared) - continue; // Prepared, we already have the RowDescription + // We may have a prepared statement that replaces an existing statement - close the latter first. + if (pStatement?.StatementBeingReplaced != null) + await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText!, batchCommand.StatementName, batchCommand.PositionalParameters, async, cancellationToken); - await connector.WriteDescribe(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, + batchCommand.CurrentParametersReadOnly, + async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); wroteSomething = true; } if (wroteSomething) { - await connector.WriteSync(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); if (flush) - await connector.Flush(async, cancellationToken); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } } } async Task SendDeriveParameters(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { - BeginSend(connector); - + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; - await connector.WriteParse(batchCommand.FinalCommandText!, string.Empty, EmptyParameters, async, cancellationToken); - await connector.WriteDescribe(StatementOrPortal.Statement, string.Empty, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText!, [], NpgsqlBatchCommand.EmptyParameters, async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, [], async, cancellationToken).ConfigureAwait(false); } - await connector.WriteSync(async, cancellationToken); - await connector.Flush(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } async Task SendPrepare(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { - BeginSend(connector); - + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; var pStatement = batchCommand.PreparedStatement; @@ -1103,49 +1199,47 @@ async Task SendPrepare(NpgsqlConnector connector, bool async, CancellationToken // We may have a prepared statement that replaces an existing statement - close the latter first. var statementToClose = pStatement!.StatementBeingReplaced; if (statementToClose != null) - await connector.WriteClose(StatementOrPortal.Statement, statementToClose.Name!, async, cancellationToken); + await connector.WriteClose(StatementOrPortal.Statement, statementToClose.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText!, pStatement.Name!, batchCommand.PositionalParameters, async, cancellationToken); - await connector.WriteDescribe(StatementOrPortal.Statement, pStatement.Name!, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText!, pStatement.Name!, batchCommand.CurrentParametersReadOnly, async, + cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, pStatement.Name!, async, cancellationToken).ConfigureAwait(false); } - await connector.WriteSync(async, cancellationToken); - await connector.Flush(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void ForceAsyncIfNecessary(ref bool async, int numberOfStatementInBatch) + bool ShouldSchedule(ref bool async, int indexOfStatementInBatch) { - if (!async && numberOfStatementInBatch > 0) - { - // We're synchronously sending the non-first statement in a batch - switch to async writing. - // See long comment in Execute() above. - - // TODO: we can simply do all batch writing asynchronously, instead of starting with the 2nd statement. - // For now, writing the first statement synchronously gives us a better chance of handle and bubbling up errors correctly - // (see sendTask.IsFaulted in Execute()). Once #1323 is done, that shouldn't be needed any more and entire batches should - // be written asynchronously. - async = true; - SynchronizationContext.SetSynchronizationContext(SingleThreadSynchronizationContext); - } + if (indexOfStatementInBatch <= 0) + return false; + + // We're synchronously sending the non-first statement in a batch - switch to async writing. + // See long comment in Execute() above. + + // TODO: we can simply do all batch writing asynchronously, instead of starting with the 2nd statement. + // For now, writing the first statement synchronously gives us a better chance of handling and bubbling up errors correctly + // (see sendTask.IsFaulted in Execute()). Once #1323 is done, that shouldn't be needed any more and entire batches should + // be written asynchronously. + async = true; + return TaskScheduler.Current != ConstrainedConcurrencyScheduler; } async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { - BeginSend(connector); - - var i = 0; - foreach (var batchCommand in InternalBatchCommands.Where(s => s.IsPrepared)) + foreach (var batchCommand in InternalBatchCommands) { - ForceAsyncIfNecessary(ref async, i); - - await connector.WriteClose(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken); + if (!batchCommand.IsPrepared) + continue; + // No need to force async here since each statement takes no more than 20 bytes + await connector.WriteClose(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); batchCommand.PreparedStatement!.State = PreparedState.BeingUnprepared; - i++; } - await connector.WriteSync(async, cancellationToken); - await connector.Flush(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } #endregion @@ -1166,25 +1260,22 @@ async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken ca /// /// A task representing the asynchronous operation, with the number of rows affected if known; -1 otherwise. public override Task ExecuteNonQueryAsync(CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteNonQuery(true, cancellationToken); - } + => ExecuteNonQuery(async: true, cancellationToken); [MethodImpl(MethodImplOptions.AggressiveInlining)] async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) { - var reader = await ExecuteReader(CommandBehavior.Default, async, cancellationToken); + var reader = await ExecuteReader(async, CommandBehavior.Default, cancellationToken).ConfigureAwait(false); try { - while (async ? await reader.NextResultAsync(cancellationToken) : reader.NextResult()) ; + while (async ? await reader.NextResultAsync(cancellationToken).ConfigureAwait(false) : reader.NextResult()) ; return reader.RecordsAffected; } finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -1211,31 +1302,25 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) /// A task representing the asynchronous operation, with the first column of the /// first row in the result set, or a null reference if the result set is empty. public override Task ExecuteScalarAsync(CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteScalar(true, cancellationToken).AsTask(); - } + => ExecuteScalar(async: true, cancellationToken).AsTask(); [MethodImpl(MethodImplOptions.AggressiveInlining)] async ValueTask ExecuteScalar(bool async, CancellationToken cancellationToken) { var behavior = CommandBehavior.SingleRow; - if (IsWrappedByBatch || !Parameters.HasOutputParameters) + if (WrappingBatch is not null || _parameters?.HasOutputParameters != true) behavior |= CommandBehavior.SequentialAccess; - var reader = await ExecuteReader(behavior, async, cancellationToken); + var reader = await ExecuteReader(async, behavior, cancellationToken).ConfigureAwait(false); try { - var read = async ? await reader.ReadAsync(cancellationToken) : reader.Read(); - var value = read && reader.FieldCount != 0 ? reader.GetValue(0) : null; - // We read the whole result set to trigger any errors - while (async ? await reader.NextResultAsync(cancellationToken) : reader.NextResult()) ; - return value; + var read = async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read(); + return read && reader.FieldCount != 0 ? reader.GetValue(0) : null; } finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -1261,7 +1346,7 @@ protected override DbDataReader ExecuteDbDataReader(CommandBehavior behavior) /// /// A task representing the asynchronous operation. protected override async Task ExecuteDbDataReaderAsync(CommandBehavior behavior, CancellationToken cancellationToken) - => await ExecuteReaderAsync(behavior, cancellationToken); + => await ExecuteReaderAsync(behavior, cancellationToken).ConfigureAwait(false); /// /// Executes the against the @@ -1270,7 +1355,7 @@ protected override async Task ExecuteDbDataReaderAsync(CommandBeha /// One of the enumeration values that specifies the command behavior. /// A task representing the operation. public new NpgsqlDataReader ExecuteReader(CommandBehavior behavior = CommandBehavior.Default) - => ExecuteReader(behavior, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => ExecuteReader(async: false, behavior, CancellationToken.None).GetAwaiter().GetResult(); /// /// An asynchronous version of , which executes @@ -1295,222 +1380,182 @@ protected override async Task ExecuteDbDataReaderAsync(CommandBeha /// /// A task representing the asynchronous operation. public new Task ExecuteReaderAsync(CommandBehavior behavior, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteReader(behavior, async: true, cancellationToken).AsTask(); - } + => ExecuteReader(async: true, behavior, cancellationToken).AsTask(); - // TODO: Maybe pool these? - internal ManualResetValueTaskSource ExecutionCompletion { get; } - = new(); - - internal virtual async ValueTask ExecuteReader(CommandBehavior behavior, bool async, CancellationToken cancellationToken) + internal virtual async ValueTask ExecuteReader(bool async, CommandBehavior behavior, CancellationToken cancellationToken) { var conn = CheckAndGetConnection(); _behavior = behavior; - NpgsqlConnector? connector; + NpgsqlConnector connector; if (_connector is not null) { Debug.Assert(conn is null); if (behavior.HasFlag(CommandBehavior.CloseConnection)) - throw new ArgumentException($"{nameof(CommandBehavior.CloseConnection)} is not supported with {nameof(NpgsqlConnector)}", nameof(behavior)); + ThrowHelper.ThrowArgumentException($"{nameof(CommandBehavior.CloseConnection)} is not supported with {nameof(NpgsqlConnector)}", nameof(behavior)); connector = _connector; } else { Debug.Assert(conn is not null); - conn.TryGetBoundConnector(out connector); + connector = conn.Connector!; } try { - if (connector is not null) - { - var dataSource = connector.DataSource; - var logger = connector.CommandLogger; + var logger = connector.CommandLogger; + var reloadableState = connector.ReloadableState; - cancellationToken.ThrowIfCancellationRequested(); - // We cannot pass a token here, as we'll cancel a non-send query - // Also, we don't pass the cancellation token to StartUserAction, since that would make it scope to the entire action (command execution) - // whereas it should only be scoped to the Execute method. - connector.StartUserAction(ConnectorState.Executing, this, CancellationToken.None); + cancellationToken.ThrowIfCancellationRequested(); + // We cannot pass a token here, as we'll cancel a non-send query + // Also, we don't pass the cancellation token to StartUserAction, since that would make it scope to the entire action (command execution) + // whereas it should only be scoped to the Execute method. + connector.StartUserAction(ConnectorState.Executing, this, CancellationToken.None); - Task? sendTask; + Task? sendTask; - var validateParameterValues = !behavior.HasFlag(CommandBehavior.SchemaOnly); + var validateParameterValues = !behavior.HasFlag(CommandBehavior.SchemaOnly); + long startTimestamp; - try + try + { + var fullyPrepared = false; + + switch (IsExplicitlyPrepared) { - switch (IsExplicitlyPrepared) + case true: + Debug.Assert(_connectorPreparedOn != null); + if (WrappingBatch is not null) + { + foreach (var batchCommand in InternalBatchCommands) + { + if (batchCommand.ConnectorPreparedOn != connector) + { + ResetPreparation(); + goto case false; + } + + batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); + } + } + else { - case true: - Debug.Assert(_connectorPreparedOn != null); if (_connectorPreparedOn != connector) { // The command was prepared, but since then the connector has changed. Detach all prepared statements. - foreach (var s in InternalBatchCommands) - s.PreparedStatement = null; ResetPreparation(); goto case false; } + _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); + } - if (IsWrappedByBatch) - foreach (var batchCommand in InternalBatchCommands) - batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); - else - Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); - - NpgsqlEventSource.Log.CommandStartPrepared(); - break; + NpgsqlEventSource.Log.CommandStartPrepared(); + connector.DataSource.MetricsReporter.CommandStartPrepared(); + fullyPrepared = true; + break; - case false: - var numPrepared = 0; + case false: + var numPrepared = 0; - if (IsWrappedByBatch) + if (WrappingBatch is not null) + { + for (var i = 0; i < InternalBatchCommands.Count; i++) { - for (var i = 0; i < InternalBatchCommands.Count; i++) - { - var batchCommand = InternalBatchCommands[i]; + var batchCommand = InternalBatchCommands[i]; - batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); - ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); + batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); + ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); - if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) - numPrepared++; + if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) + { + batchCommand.ConnectorPreparedOn = connector; + numPrepared++; } } - else - { - Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); - ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); - - if (connector.Settings.MaxAutoPrepare > 0) - for (var i = 0; i < InternalBatchCommands.Count; i++) - if (InternalBatchCommands[i].TryAutoPrepare(connector)) - numPrepared++; - } - - if (numPrepared > 0) - { - _connectorPreparedOn = connector; - if (numPrepared == InternalBatchCommands.Count) - NpgsqlEventSource.Log.CommandStartPrepared(); - } - - break; } - - State = CommandState.InProgress; - - if (logger.IsEnabled(LogLevel.Information)) + else { - connector.QueryLogStopWatch.Restart(); + _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); + ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); - if (logger.IsEnabled(LogLevel.Debug)) - LogExecutingCompleted(connector, executing: true); + if (connector.Settings.MaxAutoPrepare > 0) + for (var i = 0; i < InternalBatchCommands.Count; i++) + if (InternalBatchCommands[i].TryAutoPrepare(connector)) + numPrepared++; } - NpgsqlEventSource.Log.CommandStart(CommandText); - TraceCommandStart(connector); - - // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) - lock (connector.CancelLock) + if (numPrepared > 0) { + _connectorPreparedOn = connector; + if (numPrepared == InternalBatchCommands.Count) + { + NpgsqlEventSource.Log.CommandStartPrepared(); + connector.DataSource.MetricsReporter.CommandStartPrepared(); + fullyPrepared = true; + } } - // We do not wait for the entire send to complete before proceeding to reading - - // the sending continues in parallel with the user's reading. Waiting for the - // entire send to complete would trigger a deadlock for multi-statement commands, - // where PostgreSQL sends large results for the first statement, while we're sending large - // parameter data for the second. See #641. - // Instead, all sends for non-first statements are performed asynchronously (even if the user requested sync), - // in a special synchronization context to prevents a dependency on the thread pool (which would also trigger - // deadlocks). - BeginSend(connector); - sendTask = Write(connector, async, flush: true, CancellationToken.None); - - // The following is a hack. It raises an exception if one was thrown in the first phases - // of the send (i.e. in parts of the send that executed synchronously). Exceptions may - // still happen later and aren't properly handled. See #1323. - if (sendTask.IsFaulted) - sendTask.GetAwaiter().GetResult(); - } - catch - { - connector.EndUserAction(); - throw; + break; } - // TODO: DRY the following with multiplexing, but be careful with the cancellation registration... - var reader = connector.DataReader; - reader.Init(this, behavior, InternalBatchCommands, sendTask); - connector.CurrentReader = reader; - if (async) - await reader.NextResultAsync(cancellationToken); - else - reader.NextResult(); - - TraceReceivedFirstResponse(); + // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) + // We do it before changing the state because we only allow sending cancellation request if State == InProgress + connector.ResetCancellation(); - return reader; - } - else - { - Debug.Assert(conn is not null); - Debug.Assert(conn.Settings.Multiplexing); - - // The connection isn't bound to a connector - it's multiplexing time. - var dataSource = (MultiplexingDataSource)conn.NpgsqlDataSource; + State = CommandState.InProgress; - if (!async) + if (logger.IsEnabled(LogLevel.Information)) { - // The waiting on the ExecutionCompletion ManualResetValueTaskSource is necessarily - // asynchronous, so allowing sync would mean sync-over-async. - throw new NotSupportedException( - "Synchronous command execution is not supported when multiplexing is on"); - } + connector.QueryLogStopWatch.Restart(); - if (IsWrappedByBatch) - { - foreach (var batchCommand in InternalBatchCommands) - { - batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateValues: true, CommandType); - ProcessRawQuery(null, standardConformingStrings: true, batchCommand); - } - } - else - { - Parameters.ProcessParameters(dataSource.TypeMapper, validateValues: true, CommandType); - ProcessRawQuery(null, standardConformingStrings: true, batchCommand: null); + if (logger.IsEnabled(LogLevel.Debug)) + LogExecutingCompleted(connector, executing: true); } - State = CommandState.InProgress; - - // TODO: Experiment: do we want to wait on *writing* here, or on *reading*? - // Previous behavior was to wait on reading, which throw the exception from ExecuteReader (and not from - // the first read). But waiting on writing would allow us to do sync writing and async reading. - ExecutionCompletion.Reset(); - await dataSource.MultiplexCommandWriter.WriteAsync(this, cancellationToken); - connector = await new ValueTask(ExecutionCompletion, ExecutionCompletion.Version); - // TODO: Overload of StartBindingScope? - conn.Connector = connector; - connector.Connection = conn; - conn.ConnectorBindingScope = ConnectorBindingScope.Reader; - - var reader = connector.DataReader; - reader.Init(this, behavior, InternalBatchCommands); - connector.CurrentReader = reader; - await reader.NextResultAsync(cancellationToken); - - return reader; + NpgsqlEventSource.Log.CommandStart(CommandText); + startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); + TraceCommandStart(connector.DataSource.Configuration.TracingOptions, fullyPrepared); + TraceCommandEnrich(connector); + + // We do not wait for the entire send to complete before proceeding to reading - + // the sending continues in parallel with the user's reading. Waiting for the + // entire send to complete would trigger a deadlock for multi-statement commands, + // where PostgreSQL sends large results for the first statement, while we're sending large + // parameter data for the second. See #641. + // Instead, all sends for non-first statements are performed asynchronously (even if the user requested sync), + // in a special synchronization context to prevents a dependency on the thread pool (which would also trigger + // deadlocks). + sendTask = Write(connector, async, flush: true, CancellationToken.None); + + // The following is a hack. It raises an exception if one was thrown in the first phases + // of the send (i.e. in parts of the send that executed synchronously). Exceptions may + // still happen later and aren't properly handled. See #1323. + if (sendTask.IsFaulted) + sendTask.GetAwaiter().GetResult(); + } + catch + { + connector.EndUserAction(); + throw; } + + var reader = connector.DataReader; + reader.Init(this, behavior, InternalBatchCommands, startTimestamp, sendTask); + connector.CurrentReader = reader; + if (async) + await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); + else + reader.NextResult(); + + TraceReceivedFirstResponse(connector.DataSource.Configuration.TracingOptions); + + return reader; } catch (Exception e) { - var reader = connector?.CurrentReader; + var reader = connector.CurrentReader; if (e is not NpgsqlOperationInProgressException && reader is not null) - await reader.Cleanup(async); + await reader.Cleanup(async).ConfigureAwait(false); TraceSetException(e); @@ -1522,7 +1567,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior if ((behavior & CommandBehavior.CloseConnection) == CommandBehavior.CloseConnection) { Debug.Assert(_connector is null && conn is not null); - conn.Close(); + await conn.Close(async).ConfigureAwait(false); } throw; @@ -1539,7 +1584,13 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior protected override DbTransaction? DbTransaction { get => _transaction; - set => _transaction = (NpgsqlTransaction?)value; + set + { + var tx = (NpgsqlTransaction?)value; + if (tx is { IsCompleted: true }) + throw new InvalidOperationException("Transaction is already completed"); + _transaction = tx; + } } /// @@ -1570,7 +1621,7 @@ public override void Cancel() if (connector is null) return; - connector.PerformUserCancellation(); + connector.PerformImmediateUserCancellation(); } #endregion Cancel @@ -1580,51 +1631,89 @@ public override void Cancel() /// protected override void Dispose(bool disposing) { - _transaction = null; + ResetTransaction(); State = CommandState.Disposed; - if (IsCached && InternalConnection is not null && InternalConnection.CachedCommand is null) + if (IsCacheable && InternalConnection is not null && InternalConnection.CachedCommand is null) { - // TODO: Optimize NpgsqlParameterCollection to recycle NpgsqlParameter instances as well - // TODO: Statements isn't cleared/recycled, leaving this for now, since it'll be replaced by the new batching API - - _commandText = string.Empty; - CommandType = CommandType.Text; - _parameters.Clear(); + Reset(); InternalConnection.CachedCommand = this; return; } - IsCached = false; + IsCacheable = false; } + internal void Reset() + { + // TODO: Optimize NpgsqlParameterCollection to recycle NpgsqlParameter instances as well + // TODO: Statements isn't cleared/recycled, leaving this for now, since it'll be replaced by the new batching API + _commandText = string.Empty; + CommandType = CommandType.Text; + // Can be null if it's owned by batch + _parameters?.Clear(); + _timeout = null; + AllResultTypesAreUnknown = false; + Debug.Assert(_unknownResultTypeList is null); + EnableErrorBarriers = false; + } + + internal void ResetTransaction() => _transaction = null; + #endregion #region Tracing - #endregion Tracing - - internal void TraceCommandStart(NpgsqlConnector connector) + internal void TraceCommandStart(NpgsqlTracingOptions tracingOptions, bool? prepared) { Debug.Assert(CurrentActivity is null); + if (NpgsqlActivitySource.IsEnabled) - CurrentActivity = NpgsqlActivitySource.CommandStart(connector, CommandText); + { + var enableTracing = WrappingBatch is not null + ? tracingOptions.BatchFilter?.Invoke(WrappingBatch) ?? true + : tracingOptions.CommandFilter?.Invoke(this) ?? true; + + if (enableTracing) + { + var spanName = WrappingBatch is not null + ? tracingOptions.BatchSpanNameProvider?.Invoke(WrappingBatch) + : tracingOptions.CommandSpanNameProvider?.Invoke(this); + + CurrentActivity = NpgsqlActivitySource.CommandStart( + WrappingBatch is not null ? GetBatchFullCommandText() : CommandText, + CommandType, + prepared, + spanName); + } + } } - internal void TraceReceivedFirstResponse() + internal void TraceCommandEnrich(NpgsqlConnector connector) { if (CurrentActivity is not null) { - NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity); + NpgsqlActivitySource.Enrich(CurrentActivity, connector); + var tracingOptions = connector.DataSource.Configuration.TracingOptions; + if (WrappingBatch is not null) + tracingOptions.BatchEnrichmentCallback?.Invoke(CurrentActivity, WrappingBatch); + else + tracingOptions.CommandEnrichmentCallback?.Invoke(CurrentActivity, this); } } + internal void TraceReceivedFirstResponse(NpgsqlTracingOptions tracingOptions) + { + if (CurrentActivity is not null) + NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity, tracingOptions); + } + internal void TraceCommandStop() { if (CurrentActivity is not null) { - NpgsqlActivitySource.CommandStop(CurrentActivity); + CurrentActivity.Dispose(); CurrentActivity = null; } } @@ -1638,6 +1727,8 @@ internal void TraceSetException(Exception e) } } + #endregion Tracing + #region Misc NpgsqlBatchCommand TruncateStatementsToOne() @@ -1677,10 +1768,9 @@ internal void FixupRowDescription(RowDescriptionMessage rowDescription, bool isF for (var i = 0; i < rowDescription.Count; i++) { var field = rowDescription[i]; - field.FormatCode = (UnknownResultTypeList == null || !isFirst ? AllResultTypesAreUnknown : UnknownResultTypeList[i]) - ? FormatCode.Text - : FormatCode.Binary; - field.ResolveHandler(); + field.DataFormat = (UnknownResultTypeList == null || !isFirst ? AllResultTypesAreUnknown : UnknownResultTypeList[i]) + ? DataFormat.Text + : DataFormat.Binary; } } @@ -1688,19 +1778,20 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { var logParameters = connector.LoggingConfiguration.IsParameterLoggingEnabled || connector.Settings.LogParameters; var logger = connector.LoggingConfiguration.CommandLogger; + Debug.Assert(executing ? logger.IsEnabled(LogLevel.Debug) : logger.IsEnabled(LogLevel.Information)); if (InternalBatchCommands.Count == 1) { var singleCommand = InternalBatchCommands[0]; - if (logParameters && singleCommand.PositionalParameters.Count > 0) + if (logParameters && singleCommand.HasParameters) { if (executing) { LogMessages.ExecutingCommandWithParameters( logger, singleCommand.FinalCommandText!, - singleCommand.PositionalParameters.Select(p => p.Value == DBNull.Value ? "NULL" : p.Value!).ToArray(), + GetParametersForLogging(singleCommand), connector.Id); } else @@ -1708,7 +1799,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) LogMessages.CommandExecutionCompletedWithParameters( logger, singleCommand.FinalCommandText!, - singleCommand.PositionalParameters.Select(p => p.Value == DBNull.Value ? "NULL" : p.Value!).ToArray(), + GetParametersForLogging(singleCommand), connector.QueryLogStopWatch.ElapsedMilliseconds, connector.Id); } @@ -1725,11 +1816,9 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { if (logParameters) { - var commands = InternalBatchCommands - .Select(c => ( - c.CommandText, - Parameters: (object[]?)c.PositionalParameters.Select(p => p.Value == DBNull.Value ? "NULL" : p.Value).ToArray()!) - ).ToArray(); + var commands = new (string, IEnumerable)[InternalBatchCommands.Count]; + for (var i = 0; i < InternalBatchCommands.Count; i++) + commands[i] = (InternalBatchCommands[i].FinalCommandText!, new LoggingEnumerable(GetParametersForLogging(InternalBatchCommands[i]))); if (executing) LogMessages.ExecutingBatchWithParameters(logger, commands, connector.Id); @@ -1738,15 +1827,63 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) } else { - var commands = InternalBatchCommands.Select(c => c.CommandText).ToArray().ToArray(); - + var commands = new string[InternalBatchCommands.Count]; + for (var i = 0; i < InternalBatchCommands.Count; i++) + commands[i] = InternalBatchCommands[i].FinalCommandText!; if (executing) LogMessages.ExecutingBatch(logger, commands, connector.Id); else LogMessages.BatchExecutionCompleted(logger, commands, connector.QueryLogStopWatch.ElapsedMilliseconds, connector.Id); } } - } + + static object[] GetParametersForLogging(NpgsqlBatchCommand c) + { + var positionalParameters = c.CurrentParametersReadOnly; + var parameters = new object[positionalParameters.Count]; + for (var i = 0; i < positionalParameters.Count; i++) + { + parameters[i] = GetParameterForLogging(positionalParameters[i].Value); + } + return parameters; + + object GetParameterForLogging(object? value) + { + return value switch + { + DBNull or null => "NULL", + IEnumerable enumerable and not string => GetEnumerableForLogging(enumerable), + _ => value + }; + + string GetEnumerableForLogging(IEnumerable enumerable) + { + var vsb = new StringBuilder(256); + var count = 0; + vsb.Append('['); + foreach (var e in enumerable) + { + if (count > 9) + { + vsb.Append(", ..."); + break; + } + + if (count > 0) + { + vsb.Append(", "); + } + + vsb.Append(GetParameterForLogging(e)); + count++; + } + + vsb.Append(']'); + return vsb.ToString(); + } + } + } + } /// /// Create a new command based on this one. @@ -1762,33 +1899,32 @@ public virtual NpgsqlCommand Clone() { var clone = new NpgsqlCommand(CommandText, InternalConnection, Transaction) { - CommandTimeout = CommandTimeout, CommandType = CommandType, DesignTimeVisible = DesignTimeVisible, _allResultTypesAreUnknown = _allResultTypesAreUnknown, _unknownResultTypeList = _unknownResultTypeList, ObjectResultTypes = ObjectResultTypes + CommandTimeout = CommandTimeout, + CommandType = CommandType, + DesignTimeVisible = DesignTimeVisible, + _allResultTypesAreUnknown = _allResultTypesAreUnknown, + _unknownResultTypeList = _unknownResultTypeList }; - _parameters.CloneTo(clone._parameters); + _parameters?.CloneTo(clone.Parameters); return clone; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] NpgsqlConnection? CheckAndGetConnection() { - if (State == CommandState.Disposed) - throw new ObjectDisposedException(GetType().FullName); - if (InternalConnection == null) + ObjectDisposedException.ThrowIf(State is CommandState.Disposed, this); + + var conn = InternalConnection; + if (conn is null) { if (_connector is null) - throw new InvalidOperationException("Connection property has not been initialized."); + ThrowHelper.ThrowInvalidOperationException("Connection property has not been initialized."); return null; } - switch (InternalConnection.FullState) - { - case ConnectionState.Open: - case ConnectionState.Connecting: - case ConnectionState.Open | ConnectionState.Executing: - case ConnectionState.Open | ConnectionState.Fetching: - return InternalConnection; - default: - throw new InvalidOperationException("Connection is not open"); - } + + if (!conn.FullState.HasFlag(ConnectionState.Open)) + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + + return conn; } /// diff --git a/src/Npgsql/NpgsqlCommandBuilder.cs b/src/Npgsql/NpgsqlCommandBuilder.cs index 9665b8356c..878e194d8e 100644 --- a/src/Npgsql/NpgsqlCommandBuilder.cs +++ b/src/Npgsql/NpgsqlCommandBuilder.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Data.Common; using System.Diagnostics.CodeAnalysis; @@ -212,7 +212,11 @@ private static void SetParameterValuesFromRow(NpgsqlCommand command, DataRow row protected override void ApplyParameterInfo(DbParameter p, DataRow row, System.Data.StatementType statementType, bool whereClause) { var param = (NpgsqlParameter)p; - param.NpgsqlDbType = (NpgsqlDbType)row[SchemaTableColumn.ProviderType]; + // DbCommandBuilder is going to set DbType.Int32 onto an existing parameter, reset other db type fields. + if (param.SourceColumnNullMapping) + param.ResetDbType(); + else + param.NpgsqlDbType = (NpgsqlDbType)row[SchemaTableColumn.ProviderType]; } /// diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index a14b07ee7e..d4334582e0 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -5,7 +5,6 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.IO; using System.Net.Security; using System.Net.Sockets; using System.Runtime.CompilerServices; @@ -15,10 +14,8 @@ using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.NameTranslation; using Npgsql.TypeMapping; using Npgsql.Util; -using NpgsqlTypes; using IsolationLevel = System.Data.IsolationLevel; namespace Npgsql; @@ -48,8 +45,7 @@ public sealed class NpgsqlConnection : DbConnection, ICloneable, IComponent ConnectionState _fullState; /// - /// The physical connection to the database. This is when the connection is closed, - /// and also when it is open in multiplexing mode and unbound (e.g. not in a transaction). + /// The physical connection to the database. This is when the connection is closed. /// internal NpgsqlConnector? Connector { get; set; } @@ -71,11 +67,6 @@ internal NpgsqlDataSource NpgsqlDataSource } } - /// - /// A cached command handed out by , which is returned when disposed. Useful for reducing allocations. - /// - internal NpgsqlCommand? CachedCommand { get; set; } - /// /// Flag used to make sure we never double-close a connection, returning it twice to the pool. /// @@ -97,6 +88,8 @@ internal NpgsqlDataSource NpgsqlDataSource public INpgsqlTypeMapper TypeMapper => throw new NotSupportedException(); + static Func? _cloningInstantiator; + /// /// The default TCP/IP port for PostgreSQL. /// @@ -107,12 +100,6 @@ public INpgsqlTypeMapper TypeMapper /// internal const int TimeoutLimit = 1024; - /// - /// Tracks when this connection was bound to a physical connector (e.g. at open-time, when a transaction - /// was started...). - /// - internal ConnectorBindingScope ConnectorBindingScope { get; set; } - ILogger _connectionLogger = default!; // Initialized in Open, shouldn't be used otherwise static readonly StateChangeEventArgs ClosedToOpenEventArgs = new(ConnectionState.Closed, ConnectionState.Open); @@ -132,6 +119,7 @@ public NpgsqlConnection() /// Initializes a new instance of with the given connection string. /// /// The connection used to open the PostgreSQL database. + public NpgsqlConnection(string? connectionString) : this() => ConnectionString = connectionString; @@ -143,7 +131,6 @@ internal NpgsqlConnection(NpgsqlDataSource dataSource, NpgsqlConnector connector Connector = connector; connector.Connection = this; - ConnectorBindingScope = ConnectorBindingScope.Connection; FullState = ConnectionState.Open; } @@ -170,11 +157,7 @@ internal static NpgsqlConnection FromDataSource(NpgsqlDataSource dataSource) /// An optional token to cancel the asynchronous operation. The default value is . /// /// A task representing the asynchronous operation. - public override Task OpenAsync(CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return Open(true, cancellationToken); - } + public override Task OpenAsync(CancellationToken cancellationToken) => Open(async: true, cancellationToken); void SetupDataSource() { @@ -224,19 +207,11 @@ void SetupDataSource() dataSourceBuilder.EnableParameterLogging(NpgsqlLoggingConfiguration.GlobalIsParameterLoggingEnabled); var newDataSource = dataSourceBuilder.Build(); + // See Clone() on the following line: + _cloningInstantiator = s => new NpgsqlConnection(s); + _dataSource = PoolManager.Pools.GetOrAdd(canonical, newDataSource); - if (_dataSource == newDataSource) - { - Debug.Assert(_dataSource is not MultiHostDataSourceWrapper); - // If the pool we created was the one that ended up being stored we need to increment the appropriate counter. - // Avoids a race condition where multiple threads will create a pool but only one will be stored. - if (_dataSource is NpgsqlMultiHostDataSource multiHostConnectorPool) - foreach (var hostPool in multiHostConnectorPool.Pools) - NpgsqlEventSource.Log.DataSourceCreated(hostPool); - else - NpgsqlEventSource.Log.DataSourceCreated(newDataSource); - } - else + if (_dataSource != newDataSource) newDataSource.Dispose(); // If this is a multi-host data source and the user specified a TargetSessionAttributes, create a wrapper in front of the @@ -255,42 +230,19 @@ internal Task Open(bool async, CancellationToken cancellationToken) if (_dataSource is null) { Debug.Assert(string.IsNullOrEmpty(_connectionString)); - - throw new InvalidOperationException("The ConnectionString property has not been initialized."); + ThrowHelper.ThrowInvalidOperationException("The ConnectionString property has not been initialized."); } - FullState = ConnectionState.Connecting; _userFacingConnectionString = _dataSource.ConnectionString; _connectionLogger = _dataSource.LoggingConfiguration.ConnectionLogger; - LogMessages.OpeningConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - - if (Settings.Multiplexing) - { - if (Settings.Enlist && Transaction.Current != null) - { - // TODO: Keep in mind that the TransactionScope can be disposed - throw new NotSupportedException(); - } - - // We're opening in multiplexing mode, without a transaction. We don't actually do anything. - - // If we've never connected with this connection string, open a physical connector in order to generate - // any exception (bad user/password, IP address...). This reproduces the standard error behavior. - if (!((MultiplexingDataSource)_dataSource).StartupCheckPerformed) - return PerformMultiplexingStartupCheck(async, cancellationToken); - - LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - FullState = ConnectionState.Open; - - return Task.CompletedTask; - } + if (_connectionLogger.IsEnabled(LogLevel.Trace)) + LogMessages.OpeningConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); return OpenAsync(async, cancellationToken); async Task OpenAsync(bool async, CancellationToken cancellationToken) { - Debug.Assert(!Settings.Multiplexing); - + FullState = ConnectionState.Connecting; NpgsqlConnector? connector = null; try { @@ -309,12 +261,11 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) enlistToTransaction = null; } else - connector = await _dataSource.Get(this, timeout, async, cancellationToken); + connector = await _dataSource.Get(this, timeout, async, cancellationToken).ConfigureAwait(false); Debug.Assert(connector.Connection is null, $"Connection for opened connector '{Connector?.Id.ToString() ?? "???"}' is bound to another connection"); - ConnectorBindingScope = ConnectorBindingScope.Connection; connector.Connection = this; Connector = connector; @@ -327,7 +278,6 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) catch { FullState = ConnectionState.Closed; - ConnectorBindingScope = ConnectorBindingScope.None; Connector = null; EnlistedTransaction = null; @@ -341,26 +291,6 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) } } - async Task PerformMultiplexingStartupCheck(bool async, CancellationToken cancellationToken) - { - try - { - var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); - - _ = await StartBindingScope(ConnectorBindingScope.Connection, timeout, async, cancellationToken); - EndBindingScope(ConnectorBindingScope.Connection); - - LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - ((MultiplexingDataSource)NpgsqlDataSource).StartupCheckPerformed = true; - - FullState = ConnectionState.Open; - } - catch - { - FullState = ConnectionState.Closed; - throw; - } - } } #endregion Open / Init @@ -434,7 +364,7 @@ public override string ConnectionString /// Gets the time (in seconds) to wait while trying to execute a command /// before terminating the attempt and generating an error. /// - /// The time (in seconds) to wait for a command to complete. The default value is 20 seconds. + /// The time (in seconds) to wait for a command to complete. The default value is 30 seconds. public int CommandTimeout => Settings.CommandTimeout; /// @@ -451,24 +381,13 @@ public override string ConnectionString /// The name of the database server (host and port). If the connection uses a Unix-domain socket, /// the path to that socket is returned. The default value is the empty string. /// - public override string DataSource => Connector?.Settings.DataSourceCached ?? string.Empty; - - /// - /// Whether to use Windows integrated security to log in. - /// - public bool IntegratedSecurity => Settings.IntegratedSecurity; + public override string DataSource => Connector?.Settings.DataSourceCached ?? _dataSource?.Settings.DataSourceCached ?? string.Empty; /// /// User name. /// public string? UserName => Settings.Username; - // The following two lines are here for backwards compatibility with the EF6 provider - // ReSharper disable UnusedMember.Global - internal string? EntityTemplateDatabase => Settings.EntityTemplateDatabase; - internal string? EntityAdminDatabase => Settings.EntityAdminDatabase; - // ReSharper restore UnusedMember.Global - #endregion Configuration settings #region State management @@ -481,31 +400,45 @@ public override string ConnectionString public ConnectionState FullState { // Note: we allow accessing the state after dispose, #164 - get => _fullState switch + get { - ConnectionState.Open => Connector == null - ? ConnectionState.Open // When unbound, we only know we're open - : Connector.State switch - { - ConnectorState.Ready => ConnectionState.Open, - ConnectorState.Executing => ConnectionState.Open | ConnectionState.Executing, - ConnectorState.Fetching => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Copy => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Replication => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Waiting => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Connecting => ConnectionState.Connecting, - ConnectorState.Broken => ConnectionState.Broken, - ConnectorState.Closed => throw new InvalidOperationException("Internal Npgsql bug: connection is in state Open but connector is in state Closed"), - _ => throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {Connector.State} of enum {nameof(ConnectorState)}. Please file a bug.") - }, - _ => _fullState - }; + if (_fullState != ConnectionState.Open) + return _fullState; + + if (Connector is null) + return ConnectionState.Open; // When unbound, we only know we're open + + switch (Connector.State) + { + case ConnectorState.Ready: + return ConnectionState.Open; + case ConnectorState.Executing: + return ConnectionState.Open | ConnectionState.Executing; + case ConnectorState.Fetching: + case ConnectorState.Copy: + case ConnectorState.Replication: + case ConnectorState.Waiting: + return ConnectionState.Open | ConnectionState.Fetching; + case ConnectorState.Connecting: + return ConnectionState.Connecting; + case ConnectorState.Broken: + return ConnectionState.Broken; + case ConnectorState.Closed: + ThrowHelper.ThrowInvalidOperationException("Internal Npgsql bug: connection is in state Open but connector is in state Closed"); + return ConnectionState.Broken; + default: + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(ConnectorState)}. Please file a bug.", Connector.State); + return ConnectionState.Broken; + } + } internal set { + if (value is < 0 or > ConnectionState.Broken) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Unknown connection state", value); + var originalOpen = _fullState.HasFlag(ConnectionState.Open); _fullState = value; - var currentOpen = _fullState.HasFlag(ConnectionState.Open); if (currentOpen != originalOpen) { @@ -540,6 +473,11 @@ public override ConnectionState State #region Command / Batch creation + /// + /// A cached command handed out by , which is returned when disposed. Useful for reducing allocations. + /// + internal NpgsqlCommand? CachedCommand { get; set; } + /// /// Creates and returns a /// object associated with the . @@ -566,7 +504,11 @@ public override ConnectionState State return NpgsqlCommand.CreateCachedCommand(this); } -#if NET6_0_OR_GREATER + /// + /// A cached batch handed out by , which is returned when disposed. Useful for reducing allocations. + /// + internal NpgsqlBatch? CachedBatch { get; set; } + /// public override bool CanCreateBatch => true; @@ -574,14 +516,19 @@ public override ConnectionState State protected override DbBatch CreateDbBatch() => CreateBatch(); /// - public new NpgsqlBatch CreateBatch() => new(this); -#else - /// - /// Creates and returns a object associated with the . - /// - /// A object. - public NpgsqlBatch CreateBatch() => new(this); -#endif + public new NpgsqlBatch CreateBatch() + { + CheckDisposed(); + + var cachedBatch = CachedBatch; + if (cachedBatch is not null) + { + CachedBatch = null; + return cachedBatch; + } + + return NpgsqlBatch.CreateCachedBatch(this); + } #endregion Command / Batch creation @@ -613,42 +560,31 @@ public override ConnectionState State /// A object representing the new transaction. /// Nested transactions are not supported. public new NpgsqlTransaction BeginTransaction(IsolationLevel level) - => BeginTransaction(level, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginTransaction(async: false, level, CancellationToken.None).GetAwaiter().GetResult(); - async ValueTask BeginTransaction(IsolationLevel level, bool async, CancellationToken cancellationToken) + async ValueTask BeginTransaction(bool async, IsolationLevel level, CancellationToken cancellationToken) { if (level == IsolationLevel.Chaos) - throw new NotSupportedException("Unsupported IsolationLevel: " + level); + ThrowHelper.ThrowNotSupportedException($"Unsupported IsolationLevel: {nameof(IsolationLevel.Chaos)}"); CheckReady(); - if (Connector is { InTransaction: true }) - throw new InvalidOperationException("A transaction is already in progress; nested/concurrent transactions aren't supported."); + var connector = Connector; + if (connector is { InTransaction: true }) + ThrowHelper.ThrowInvalidOperationException("A transaction is already in progress; nested/concurrent transactions aren't supported."); // There was a committed/rolled back transaction, but it was not disposed - var connector = ConnectorBindingScope == ConnectorBindingScope.Transaction - ? Connector - : await StartBindingScope(ConnectorBindingScope.Transaction, NpgsqlTimeout.Infinite, async, cancellationToken); Debug.Assert(connector != null); - try - { - // Note that beginning a transaction doesn't actually send anything to the backend (only prepends). - // But we start a user action to check the cancellation token and generate exceptions - using var _ = connector.StartUserAction(cancellationToken); + // Note that beginning a transaction doesn't actually send anything to the backend (only prepends). + // But we start a user action to check the cancellation token and generate exceptions + using var _ = connector.StartUserAction(cancellationToken); - connector.Transaction ??= new NpgsqlTransaction(connector); - connector.Transaction.Init(level); - return connector.Transaction; - } - catch - { - EndBindingScope(ConnectorBindingScope.Transaction); - throw; - } + connector.Transaction ??= new NpgsqlTransaction(connector); + connector.Transaction.Init(level); + return connector.Transaction; } -#if !NETSTANDARD2_0 /// /// Asynchronously begins a database transaction. /// @@ -661,7 +597,7 @@ async ValueTask BeginTransaction(IsolationLevel level, bool a /// Nested transactions are not supported. /// protected override async ValueTask BeginDbTransactionAsync(IsolationLevel isolationLevel, CancellationToken cancellationToken) - => await BeginTransactionAsync(isolationLevel, cancellationToken); + => await BeginTransactionAsync(isolationLevel, cancellationToken).ConfigureAwait(false); /// /// Asynchronously begins a database transaction. @@ -689,20 +625,13 @@ protected override async ValueTask BeginDbTransactionAsync(Isolat /// Nested transactions are not supported. /// public new ValueTask BeginTransactionAsync(IsolationLevel level, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginTransaction(level, async: true, cancellationToken); - } -#endif + => BeginTransaction(async: true, level, cancellationToken); /// /// Enlist transaction. /// public override void EnlistTransaction(Transaction? transaction) { - if (Settings.Multiplexing) - throw new NotSupportedException("Ambient transactions aren't yet implemented for multiplexing"); - if (EnlistedTransaction != null) { if (EnlistedTransaction.Equals(transaction)) @@ -721,14 +650,11 @@ public override void EnlistTransaction(Transaction? transaction) } CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Transaction); + var connector = Connector!; EnlistedTransaction = transaction; if (transaction == null) - { - EndBindingScope(ConnectorBindingScope.Transaction); return; - } // Until #1378 is implemented, we have no recovery, and so no need to enlist as a durable resource manager // (or as promotable single phase). @@ -742,7 +668,7 @@ public override void EnlistTransaction(Transaction? transaction) EnlistedTransaction = transaction; LogMessages.EnlistedVolatileResourceManager( - Connector!.LoggingConfiguration.TransactionLogger, + connector.LoggingConfiguration.TransactionLogger, transaction.TransactionInformation.LocalIdentifier, connector.Id); } @@ -761,15 +687,8 @@ public override void EnlistTransaction(Transaction? transaction) /// Releases the connection. If the connection is pooled, it will be returned to the pool and made available for re-use. /// If it is non-pooled, the physical connection will be closed. /// -#if NETSTANDARD2_0 - public Task CloseAsync() -#else public override Task CloseAsync() -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Close(async: true); - } + => Close(async: true); internal bool TakeCloseLock() => Interlocked.Exchange(ref _closing, 1) == 0; @@ -802,28 +721,12 @@ internal Task Close(bool async) throw new ArgumentOutOfRangeException("Unknown connection state: " + FullState); } - // TODO: The following shouldn't exist - we need to flow down the regular path to close any - // open reader / COPY. See test CloseDuringRead with multiplexing. - if (Settings.Multiplexing && ConnectorBindingScope == ConnectorBindingScope.None) - { - // TODO: Consider falling through to the regular reset logic. This adds some unneeded conditions - // and assignment but actual perf impact should be negligible (measure). - Debug.Assert(Connector == null); - ReleaseCloseLock(); - - FullState = ConnectionState.Closed; - LogMessages.ClosedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - - return Task.CompletedTask; - } - - return CloseAsync(async); + return CloseAsync(async); } async Task CloseAsync(bool async) { Debug.Assert(Connector != null); - Debug.Assert(ConnectorBindingScope != ConnectorBindingScope.None); try { @@ -833,36 +736,22 @@ async Task CloseAsync(bool async) if (connector.CurrentReader != null || connector.CurrentCopyOperation != null) { // This method could re-enter connection.Close() due to an underlying connection failure. - await connector.CloseOngoingOperations(async); - - if (ConnectorBindingScope == ConnectorBindingScope.None) - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(Connector is null); - - FullState = ConnectionState.Closed; - LogMessages.ClosedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - return; - } + await connector.CloseOngoingOperations(async).ConfigureAwait(false); } - Debug.Assert(connector.IsReady || connector.IsBroken); + Debug.Assert(connector.IsReady || connector.IsBroken, $"Connector is not ready or broken during close, it's {connector.State}"); Debug.Assert(connector.CurrentReader == null); Debug.Assert(connector.CurrentCopyOperation == null); if (EnlistedTransaction != null) { - // A System.Transactions transaction is still in progress - - connector.Connection = null; - - // If pooled, close the connection and disconnect it from the resource manager but leave the - // connector in an enlisted pending list in the pool. If another connection is opened within + // A System.Transactions transaction is still in progress. + // Close the connection and disconnect it from the resource manager and reset the connector, but leave the + // connector in an enlisted pending list in the data source. If another connection is opened within // the same transaction scope, we will reuse this connector to avoid escalating to a distributed - // transaction - // If a *non-pooled* connection is being closed but is enlisted in an ongoing - // TransactionScope, we do nothing - simply detach the connector from the connection and leave - // it open. It will be closed when the TransactionScope is disposed. + // transaction. + connector.ResetWithinEnlistedTransaction(); + connector.Connection = null; _dataSource?.AddPendingEnlistedConnector(connector, EnlistedTransaction); EnlistedTransaction = null; @@ -872,34 +761,22 @@ async Task CloseAsync(bool async) if (Settings.Pooling) { // Clear the buffer, roll back any pending transaction and prepend a reset message if needed - // Also returns the connector to the pool, if there is an open transaction and multiplexing is on // Note that we're doing this only for pooled connections - await connector.Reset(async); + await connector.Reset(async).ConfigureAwait(false); } else { // We're already doing the same in the NpgsqlConnector.Reset for pooled connections // TODO: move reset logic to ConnectorSource.Return connector.Transaction?.UnbindIfNecessary(); - } - - if (Settings.Multiplexing) - { - // We've already closed ongoing operations rolled back any transaction and the connector is already in the pool, - // so we must be unbound. Nothing to do. - Debug.Assert(ConnectorBindingScope == ConnectorBindingScope.None, - $"When closing a multiplexed connection, the connection was supposed to be unbound, but {nameof(ConnectorBindingScope)} was {ConnectorBindingScope}"); - } - else - { - connector.Connection = null; - connector.Return(); } + + connector.Connection = null; + connector.Return(); } LogMessages.ClosedConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString, connector.Id); Connector = null; - ConnectorBindingScope = ConnectorBindingScope.None; FullState = ConnectionState.Closed; } finally @@ -925,24 +802,13 @@ protected override void Dispose(bool disposing) /// /// Releases all resources used by the . /// -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else - public override ValueTask DisposeAsync() -#endif + public override async ValueTask DisposeAsync() { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncCore(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - async ValueTask DisposeAsyncCore() - { - if (_disposed) - return; + if (_disposed) + return; - await CloseAsync(); - _disposed = true; - } + await CloseAsync().ConfigureAwait(false); + _disposed = true; } internal void MakeDisposed() @@ -1008,17 +874,50 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// Returns whether SSL is being used for the connection. /// - internal bool IsSecure => CheckOpenAndRunInTemporaryScope(c => c.IsSecure); + internal bool IsSslEncrypted + { + get + { + CheckOpen(); + return Connector!.IsSslEncrypted; + } + } + + /// + /// Returns whether GSS encryption is being used for the connection. + /// + internal bool IsGssEncrypted + { + get + { + CheckOpen(); + return Connector!.IsGssEncrypted; + } + } /// /// Returns whether SCRAM-SHA256 is being user for the connection /// - internal bool IsScram => CheckOpenAndRunInTemporaryScope(c => c.IsScram); + internal bool IsScram + { + get + { + CheckOpen(); + return Connector!.IsScram; + } + } /// /// Returns whether SCRAM-SHA256-PLUS is being user for the connection /// - internal bool IsScramPlus => CheckOpenAndRunInTemporaryScope(c => c.IsScramPlus); + internal bool IsScramPlus + { + get + { + CheckOpen(); + return Connector!.IsScramPlus; + } + } /// /// Selects the local Secure Sockets Layer (SSL) certificate used for authentication. @@ -1026,6 +925,7 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// See /// + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public ProvideClientCertificatesCallback? ProvideClientCertificatesCallback { get; set; } /// @@ -1041,8 +941,19 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// See . /// /// + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; set; } + /// + /// When using SSL/TLS, this is a callback that allows customizing SslStream's authentication options. + /// + /// + /// + /// See . + /// + /// + public Action? SslClientAuthenticationOptionsCallback { get; set; } + #endregion SSL #region Backend version, capabilities, settings @@ -1062,7 +973,14 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// [Browsable(false)] - public Version PostgreSqlVersion => CheckOpenAndRunInTemporaryScope(c => c.DatabaseInfo.Version); + public Version PostgreSqlVersion + { + get + { + CheckOpen(); + return Connector!.DatabaseInfo.Version; + } + } /// /// The PostgreSQL server version as returned by the server_version option. @@ -1070,8 +988,14 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// This can only be called when the connection is open. /// /// - public override string ServerVersion => CheckOpenAndRunInTemporaryScope( - c => c.DatabaseInfo.ServerVersion); + public override string ServerVersion + { + get + { + CheckOpen(); + return Connector!.DatabaseInfo.ServerVersion; + } + } /// /// Process id of backend server. @@ -1084,10 +1008,7 @@ public int ProcessID get { CheckOpen(); - - return TryGetBoundConnector(out var connector) - ? connector.BackendProcessId - : throw new InvalidOperationException("No bound physical connection (using multiplexing)"); + return Connector!.BackendProcessId; } } @@ -1097,13 +1018,27 @@ public int ProcessID /// Meant for use by type plugins (e.g. NodaTime) /// [Browsable(false)] - public bool HasIntegerDateTimes => CheckOpenAndRunInTemporaryScope(c => c.DatabaseInfo.HasIntegerDateTimes); + public bool HasIntegerDateTimes + { + get + { + CheckOpen(); + return Connector!.DatabaseInfo.HasIntegerDateTimes; + } + } /// /// The connection's timezone as reported by PostgreSQL, in the IANA/Olson database format. /// [Browsable(false)] - public string Timezone => CheckOpenAndRunInTemporaryScope(c => c.Timezone); + public string Timezone + { + get + { + CheckOpen(); + return Connector!.Timezone; + } + } /// /// Holds all PostgreSQL parameters received for this connection. Is updated if the values change @@ -1111,7 +1046,13 @@ public int ProcessID /// [Browsable(false)] public IReadOnlyDictionary PostgresParameters - => CheckOpenAndRunInTemporaryScope(c => c.PostgresParameters); + { + get + { + CheckOpen(); + return Connector!.PostgresParameters; + } + } #endregion Backend version, capabilities, settings @@ -1126,7 +1067,7 @@ public IReadOnlyDictionary PostgresParameters /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public NpgsqlBinaryImporter BeginBinaryImport(string copyFromCommand) - => BeginBinaryImport(copyFromCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginBinaryImport(async: false, copyFromCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a binary COPY FROM STDIN operation, a high-performance data import mechanism to a PostgreSQL table. @@ -1138,35 +1079,40 @@ public NpgsqlBinaryImporter BeginBinaryImport(string copyFromCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginBinaryImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginBinaryImport(copyFromCommand, async: true, cancellationToken); - } + => BeginBinaryImport(async: true, copyFromCommand, cancellationToken); - async Task BeginBinaryImport(string copyFromCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginBinaryImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { - if (copyFromCommand == null) - throw new ArgumentNullException(nameof(copyFromCommand)); - if (!copyFromCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + ArgumentNullException.ThrowIfNull(copyFromCommand); + if (!IsValidCopyCommand(copyFromCommand)) throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingBinaryImport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var importer = new NpgsqlBinaryImporter(connector); try { - var importer = new NpgsqlBinaryImporter(connector); - await importer.Init(copyFromCommand, async, cancellationToken); + await importer.Init(copyFromCommand, async, cancellationToken).ConfigureAwait(false); connector.CurrentCopyOperation = importer; return importer; } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await importer.DisposeAsync().ConfigureAwait(false); + else + importer.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1180,7 +1126,7 @@ async Task BeginBinaryImport(string copyFromCommand, bool /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public NpgsqlBinaryExporter BeginBinaryExport(string copyToCommand) - => BeginBinaryExport(copyToCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginBinaryExport(async: false, copyToCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a binary COPY TO STDOUT operation, a high-performance data export mechanism from a PostgreSQL table. @@ -1192,35 +1138,40 @@ public NpgsqlBinaryExporter BeginBinaryExport(string copyToCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginBinaryExportAsync(string copyToCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginBinaryExport(copyToCommand, async: true, cancellationToken); - } + => BeginBinaryExport(async: true, copyToCommand, cancellationToken); - async Task BeginBinaryExport(string copyToCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginBinaryExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { - if (copyToCommand == null) - throw new ArgumentNullException(nameof(copyToCommand)); - if (!copyToCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + ArgumentNullException.ThrowIfNull(copyToCommand); + if (!IsValidCopyCommand(copyToCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingBinaryExport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var exporter = new NpgsqlBinaryExporter(connector); try { - var exporter = new NpgsqlBinaryExporter(connector); - await exporter.Init(copyToCommand, async, cancellationToken); + await exporter.Init(copyToCommand, async, cancellationToken).ConfigureAwait(false); connector.CurrentCopyOperation = exporter; return exporter; } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await exporter.DisposeAsync().ConfigureAwait(false); + else + exporter.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1236,8 +1187,8 @@ async Task BeginBinaryExport(string copyToCommand, bool as /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public TextWriter BeginTextImport(string copyFromCommand) - => BeginTextImport(copyFromCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + public NpgsqlCopyTextWriter BeginTextImport(string copyFromCommand) + => BeginTextImport(async: false, copyFromCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a textual COPY FROM STDIN operation, a data import mechanism to a PostgreSQL table. @@ -1251,37 +1202,42 @@ public TextWriter BeginTextImport(string copyFromCommand) /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public Task BeginTextImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginTextImport(copyFromCommand, async: true, cancellationToken); - } + public Task BeginTextImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) + => BeginTextImport(async: true, copyFromCommand, cancellationToken); - async Task BeginTextImport(string copyFromCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginTextImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { - if (copyFromCommand == null) - throw new ArgumentNullException(nameof(copyFromCommand)); - if (!copyFromCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + ArgumentNullException.ThrowIfNull(copyFromCommand); + if (!IsValidCopyCommand(copyFromCommand)) throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingTextImport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var copyStream = new NpgsqlRawCopyStream(connector); try { - var copyStream = new NpgsqlRawCopyStream(connector); - await copyStream.Init(copyFromCommand, async, cancellationToken); + await copyStream.Init(copyFromCommand, async, forExport: false, cancellationToken).ConfigureAwait(false); var writer = new NpgsqlCopyTextWriter(connector, copyStream); connector.CurrentCopyOperation = writer; return writer; } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await copyStream.DisposeAsync().ConfigureAwait(false); + else + copyStream.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1297,8 +1253,8 @@ async Task BeginTextImport(string copyFromCommand, bool async, Cance /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public TextReader BeginTextExport(string copyToCommand) - => BeginTextExport(copyToCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + public NpgsqlCopyTextReader BeginTextExport(string copyToCommand) + => BeginTextExport(async: false, copyToCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a textual COPY TO STDOUT operation, a data export mechanism from a PostgreSQL table. @@ -1312,37 +1268,42 @@ public TextReader BeginTextExport(string copyToCommand) /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public Task BeginTextExportAsync(string copyToCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginTextExport(copyToCommand, async: true, cancellationToken); - } + public Task BeginTextExportAsync(string copyToCommand, CancellationToken cancellationToken = default) + => BeginTextExport(async: true, copyToCommand, cancellationToken); - async Task BeginTextExport(string copyToCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginTextExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { - if (copyToCommand == null) - throw new ArgumentNullException(nameof(copyToCommand)); - if (!copyToCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + ArgumentNullException.ThrowIfNull(copyToCommand); + if (!IsValidCopyCommand(copyToCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingTextExport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var copyStream = new NpgsqlRawCopyStream(connector); try { - var copyStream = new NpgsqlRawCopyStream(connector); - await copyStream.Init(copyToCommand, async, cancellationToken); + await copyStream.Init(copyToCommand, async, forExport: true, cancellationToken).ConfigureAwait(false); var reader = new NpgsqlCopyTextReader(connector, copyStream); connector.CurrentCopyOperation = reader; return reader; } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await copyStream.DisposeAsync().ConfigureAwait(false); + else + copyStream.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1359,7 +1320,7 @@ async Task BeginTextExport(string copyToCommand, bool async, Cancell /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public NpgsqlRawCopyStream BeginRawBinaryCopy(string copyCommand) - => BeginRawBinaryCopy(copyCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginRawBinaryCopy(async: false, copyCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a raw binary COPY operation (TO STDOUT or FROM STDIN), a high-performance data export/import mechanism to a PostgreSQL table. @@ -1374,28 +1335,24 @@ public NpgsqlRawCopyStream BeginRawBinaryCopy(string copyCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginRawBinaryCopyAsync(string copyCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginRawBinaryCopy(copyCommand, async: true, cancellationToken); - } + => BeginRawBinaryCopy(async: true, copyCommand, cancellationToken); - async Task BeginRawBinaryCopy(string copyCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginRawBinaryCopy(bool async, string copyCommand, CancellationToken cancellationToken = default) { - if (copyCommand == null) - throw new ArgumentNullException(nameof(copyCommand)); - if (!copyCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + ArgumentNullException.ThrowIfNull(copyCommand); + if (!IsValidCopyCommand(copyCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT OR COPY FROM STDIN command!", nameof(copyCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingRawCopy(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var stream = new NpgsqlRawCopyStream(connector); try { - var stream = new NpgsqlRawCopyStream(connector); - await stream.Init(copyCommand, async, cancellationToken); + await stream.Init(copyCommand, async, forExport: null, cancellationToken).ConfigureAwait(false); if (!stream.IsBinary) { // TODO: Stop the COPY operation gracefully, no breaking @@ -1407,12 +1364,23 @@ async Task BeginRawBinaryCopy(string copyCommand, bool asyn } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await stream.DisposeAsync().ConfigureAwait(false); + else + stream.Dispose(); + } + catch + { + // ignored + } throw; } } + static bool IsValidCopyCommand(string copyCommand) => copyCommand.AsSpan().TrimStart().StartsWith("COPY", StringComparison.OrdinalIgnoreCase); + #endregion #region Wait @@ -1432,8 +1400,6 @@ public bool Wait(int timeout) { if (timeout != -1 && timeout < 0) throw new ArgumentException("Argument must be -1, 0 or positive", nameof(timeout)); - if (Settings.Multiplexing) - throw new NotSupportedException($"{nameof(Wait)} isn't supported in multiplexing mode"); CheckReady(); @@ -1475,14 +1441,10 @@ public bool Wait(int timeout) /// true if an asynchronous message was received, false if timed out. public Task WaitAsync(int timeout, CancellationToken cancellationToken = default) { - if (Settings.Multiplexing) - throw new NotSupportedException($"{nameof(Wait)} isn't supported in multiplexing mode"); - CheckReady(); LogMessages.StartingWait(_connectionLogger, timeout, Connector!.Id); - using (NoSynchronizationContextScope.Enter()) - return Connector!.Wait(async: true, timeout, cancellationToken); + return Connector!.Wait(async: true, timeout, cancellationToken); } /// @@ -1524,43 +1486,29 @@ void CheckOpen() case ConnectionState.Open | ConnectionState.Executing: case ConnectionState.Open | ConnectionState.Fetching: case ConnectionState.Connecting: - break; + return; case ConnectionState.Closed: case ConnectionState.Broken: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + return; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return; } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckClosed() { CheckDisposed(); - switch (FullState) - { - case ConnectionState.Closed: - case ConnectionState.Broken: - break; - case ConnectionState.Open: - case ConnectionState.Connecting: - case ConnectionState.Open | ConnectionState.Executing: - case ConnectionState.Open | ConnectionState.Fetching: - throw new InvalidOperationException("Connection already open"); - default: - throw new ArgumentOutOfRangeException(); - } + var fullState = FullState; + if (fullState is ConnectionState.Connecting || fullState.HasFlag(ConnectionState.Open)) + ThrowHelper.ThrowInvalidOperationException("Connection already open"); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckDisposed() - { - if (_disposed) - throw new ObjectDisposedException(typeof(NpgsqlConnection).Name); - } + => ObjectDisposedException.ThrowIf(_disposed, this); - [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void CheckReady() { CheckDisposed(); @@ -1569,136 +1517,28 @@ internal void CheckReady() { case ConnectionState.Open: case ConnectionState.Connecting: // We need to do type loading as part of connecting - break; + return; case ConnectionState.Closed: case ConnectionState.Broken: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + return; case ConnectionState.Open | ConnectionState.Executing: case ConnectionState.Open | ConnectionState.Fetching: - throw new InvalidOperationException("Connection is busy"); + ThrowHelper.ThrowInvalidOperationException("Connection is busy"); + return; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return; } } #endregion State checks - #region Connector binding - - /// - /// Checks whether the connection is currently bound to a connector, and if so, returns it via - /// . - /// - internal bool TryGetBoundConnector([NotNullWhen(true)] out NpgsqlConnector? connector) - { - if (ConnectorBindingScope == ConnectorBindingScope.None) - { - Debug.Assert(Connector == null, $"Binding scope is None but {Connector} exists"); - connector = null; - return false; - } - Debug.Assert(Connector != null, $"Binding scope is {ConnectorBindingScope} but {Connector} is null"); - Debug.Assert(Connector.Connection == this, $"Bound connector {Connector} does not reference this connection"); - connector = Connector; - return true; - } - - /// - /// Binds this connection to a physical connector. This happens when opening a non-multiplexing connection, - /// or when starting a transaction on a multiplexed connection. - /// - internal ValueTask StartBindingScope( - ConnectorBindingScope scope, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) - { - // If the connection is around bound at a higher scope, we do nothing (e.g. copy operation started - // within a transaction on a multiplexing connection). - // Note that if we're in an ambient transaction, that means we're already bound and so we do nothing here. - if (ConnectorBindingScope != ConnectorBindingScope.None) - { - Debug.Assert(Connector != null, $"Connection bound with scope {ConnectorBindingScope} but has no connector"); - Debug.Assert(scope != ConnectorBindingScope, $"Binding scopes aren't reentrant ({ConnectorBindingScope})"); - return new ValueTask(Connector); - } - - return StartBindingScopeAsync(); - - async ValueTask StartBindingScopeAsync() - { - try - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(_dataSource != null); - - var connector = await _dataSource.Get(this, timeout, async, cancellationToken); - Connector = connector; - connector.Connection = this; - ConnectorBindingScope = scope; - return connector; - } - catch - { - FullState = ConnectionState.Broken; - throw; - } - } - } - - internal NpgsqlConnector StartBindingScope(ConnectorBindingScope scope) - => StartBindingScope(scope, NpgsqlTimeout.Infinite, async: false, CancellationToken.None) - .GetAwaiter().GetResult(); - - internal EndScopeDisposable StartTemporaryBindingScope(out NpgsqlConnector connector) - { - connector = StartBindingScope(ConnectorBindingScope.Temporary); - return new EndScopeDisposable(this); - } - - internal T CheckOpenAndRunInTemporaryScope(Func func) - { - CheckOpen(); - - using var _ = StartTemporaryBindingScope(out var connector); - var result = func(connector); - return result; - } - - /// - /// Ends binding scope to the physical connection and returns it to the pool. Only useful with multiplexing on. - /// - /// - /// After this method is called, under no circumstances the physical connection (connector) should ever be used if multiplexing is on. - /// See #3249. - /// - internal void EndBindingScope(ConnectorBindingScope scope) - { - Debug.Assert(ConnectorBindingScope != ConnectorBindingScope.None || FullState == ConnectionState.Broken, - $"Ending binding scope {scope} but connection's scope is null"); - - if (scope != ConnectorBindingScope) - return; - - Debug.Assert(Connector != null, $"Ending binding scope {scope} but connector is null"); - Debug.Assert(_dataSource != null, $"Ending binding scope {scope} but _pool is null"); - Debug.Assert(Settings.Multiplexing, $"Ending binding scope {scope} but multiplexing is disabled"); - - // TODO: If enlisted transaction scope is still active, need to AddPendingEnlistedConnector, just like Close - var connector = Connector; - Connector = null; - connector.Connection = null; - connector.Transaction?.UnbindIfNecessary(); - connector.Return(); - ConnectorBindingScope = ConnectorBindingScope.None; - } - - #endregion Connector binding - #region Schema operations /// /// Returns the supported collections /// - [UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] public override DataTable GetSchema() => GetSchema("MetaDataCollections", null); @@ -1719,7 +1559,7 @@ public override DataTable GetSchema() /// /// The collection specified. public override DataTable GetSchema(string? collectionName, string?[]? restrictions) - => NpgsqlSchema.GetSchema(this, collectionName, restrictions, async: false).GetAwaiter().GetResult(); + => NpgsqlSchema.GetSchema(async: false, this, collectionName, restrictions).GetAwaiter().GetResult(); /// /// Asynchronously returns the supported collections. @@ -1728,11 +1568,7 @@ public override DataTable GetSchema(string? collectionName, string?[]? restricti /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. -#if NET5_0_OR_GREATER public override Task GetSchemaAsync(CancellationToken cancellationToken = default) -#else - public Task GetSchemaAsync(CancellationToken cancellationToken = default) -#endif => GetSchemaAsync("MetaDataCollections", null, cancellationToken); /// @@ -1743,11 +1579,7 @@ public Task GetSchemaAsync(CancellationToken cancellationToken = defa /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. -#if NET5_0_OR_GREATER public override Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) -#else - public Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) -#endif => GetSchemaAsync(collectionName, null, cancellationToken); /// @@ -1762,15 +1594,8 @@ public Task GetSchemaAsync(string collectionName, CancellationToken c /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. -#if NET5_0_OR_GREATER public override Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) -#else - public Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return NpgsqlSchema.GetSchema(this, collectionName, restrictions, async: true, cancellationToken); - } + => NpgsqlSchema.GetSchema(async: true, this, collectionName, restrictions, cancellationToken); #endregion Schema operations @@ -1783,13 +1608,20 @@ object ICloneable.Clone() { CheckDisposed(); + // For NativeAOT code size reduction, we avoid instantiating a connection here directly with + // `new NpgsqlConnection(_connectionString)`, since that would bring in the default data source builder, and with it various + // features which significantly increase binary size (ranges, System.Text.Json...). Instead, we pass through a "cloning + // instantiator" abstraction, where the implementation only ever gets set if SetupDataSource above is called (in which case the + // default data source is brought in anyway). + Debug.Assert(_dataSource is not null || _cloningInstantiator is not null); var conn = _dataSource is null - ? new NpgsqlConnection(_connectionString) + ? _cloningInstantiator!(_connectionString) : _dataSource.CreateConnection(); + conn.SslClientAuthenticationOptionsCallback = SslClientAuthenticationOptionsCallback; +#pragma warning disable CS0618 // Obsolete conn.ProvideClientCertificatesCallback = ProvideClientCertificatesCallback; conn.UserCertificateValidationCallback = UserCertificateValidationCallback; -#pragma warning disable CS0618 // Obsolete conn.ProvidePasswordCallback = ProvidePasswordCallback; #pragma warning restore CS0618 conn._userFacingConnectionString = _userFacingConnectionString; @@ -1813,13 +1645,35 @@ public NpgsqlConnection CloneWith(string connectionString) return new NpgsqlConnection(csb.ToString()) { - ProvideClientCertificatesCallback = - ProvideClientCertificatesCallback ?? - (_dataSource?.ClientCertificatesCallback is { } clientCertificatesCallback - ? (ProvideClientCertificatesCallback)(certs => clientCertificatesCallback(certs)) - : null), - UserCertificateValidationCallback = UserCertificateValidationCallback ?? _dataSource?.UserCertificateValidationCallback, + SslClientAuthenticationOptionsCallback = SslClientAuthenticationOptionsCallback ?? _dataSource?.SslClientAuthenticationOptionsCallback, +#pragma warning disable CS0618 // Obsolete + ProvideClientCertificatesCallback = ProvideClientCertificatesCallback, + UserCertificateValidationCallback = UserCertificateValidationCallback, + ProvidePasswordCallback = ProvidePasswordCallback, +#pragma warning restore CS0618 + }; + } + + /// + /// Clones this connection, replacing its connection string with the given one. + /// This allows creating a new connection with the same security information + /// (password, SSL callbacks) while changing other connection parameters (e.g. + /// database or pooling) + /// + public async ValueTask CloneWithAsync(string connectionString, CancellationToken cancellationToken = default) + { + CheckDisposed(); + var csb = new NpgsqlConnectionStringBuilder(connectionString); + csb.Password ??= _dataSource is null ? null : await _dataSource.GetPassword(async: true, cancellationToken).ConfigureAwait(false); + if (csb.PersistSecurityInfo && !Settings.PersistSecurityInfo) + csb.PersistSecurityInfo = false; + + return new NpgsqlConnection(csb.ToString()) + { + SslClientAuthenticationOptionsCallback = SslClientAuthenticationOptionsCallback ?? _dataSource?.SslClientAuthenticationOptionsCallback, #pragma warning disable CS0618 // Obsolete + ProvideClientCertificatesCallback = ProvideClientCertificatesCallback, + UserCertificateValidationCallback = UserCertificateValidationCallback, ProvidePasswordCallback = ProvidePasswordCallback, #pragma warning restore CS0618 }; @@ -1832,8 +1686,7 @@ public NpgsqlConnection CloneWith(string connectionString) /// The name of the database to use in place of the current database. public override void ChangeDatabase(string dbName) { - if (dbName == null) - throw new ArgumentNullException(nameof(dbName)); + ArgumentNullException.ThrowIfNull(dbName); if (string.IsNullOrEmpty(dbName)) throw new ArgumentOutOfRangeException(nameof(dbName), dbName, $"Invalid database name: {dbName}"); @@ -1872,9 +1725,6 @@ public override void ChangeDatabase(string dbName) /// public void UnprepareAll() { - if (Settings.Multiplexing) - throw new NotSupportedException("Explicit preparation not supported with multiplexing"); - CheckReady(); using (Connector!.StartUserAction()) @@ -1889,10 +1739,8 @@ public void ReloadTypes() { CheckReady(); - using var scope = StartTemporaryBindingScope(out var connector); - _dataSource!.Bootstrap( - connector, + Connector!, NpgsqlTimeout.Infinite, forceReload: true, async: false, @@ -1904,18 +1752,23 @@ public void ReloadTypes() /// Flushes the type cache for this connection's connection string and reloads the types for this connection only. /// Type changes will appear for other connections only after they are re-opened from the pool. /// - public async Task ReloadTypesAsync() + public Task ReloadTypesAsync() + => ReloadTypesAsync(CancellationToken.None); + + /// + /// Flushes the type cache for this connection's connection string and reloads the types for this connection only. + /// Type changes will appear for other connections only after they are re-opened from the pool. + /// + public async Task ReloadTypesAsync(CancellationToken cancellationToken) { CheckReady(); - using var scope = StartTemporaryBindingScope(out var connector); - await _dataSource!.Bootstrap( - connector, - NpgsqlTimeout.Infinite, - forceReload: true, - async: true, - CancellationToken.None); + Connector!, + NpgsqlTimeout.Infinite, + forceReload: true, + async: true, + cancellationToken).ConfigureAwait(false); } /// @@ -1937,48 +1790,6 @@ event EventHandler? IComponent.Disposed #endregion Misc } -enum ConnectorBindingScope -{ - /// - /// The connection is currently not bound to a connector. - /// - None, - - /// - /// The connection is bound to its connector for the scope of the entire connection - /// (i.e. non-multiplexed connection). - /// - Connection, - - /// - /// The connection is bound to its connector for the scope of a transaction. - /// - Transaction, - - /// - /// The connection is bound to its connector for the scope of a COPY operation. - /// - Copy, - - /// - /// The connection is bound to its connector for the scope of a single reader. - /// - Reader, - - /// - /// The connection is bound to its connector for an unspecified, temporary scope; the code that initiated - /// the binding is also responsible to unbind it. - /// - Temporary -} - -readonly struct EndScopeDisposable : IDisposable -{ - readonly NpgsqlConnection _connection; - public EndScopeDisposable(NpgsqlConnection connection) => _connection = connection; - public void Dispose() => _connection.EndBindingScope(ConnectorBindingScope.Temporary); -} - #region Delegates /// diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 26589cef52..9b79c9f064 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -6,10 +6,7 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq; using Npgsql.Internal; -using Npgsql.Netstandard20; -using Npgsql.Properties; using Npgsql.Replication; namespace Npgsql; @@ -18,6 +15,10 @@ namespace Npgsql; /// Provides a simple way to create and manage the contents of connection strings used by /// the class. /// +[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", + Justification = "Suppressing the same warnings as suppressed in the base DbConnectionStringBuilder. See https://github.com/dotnet/runtime/issues/97057")] +[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2113:ReflectionToRequiresUnreferencedCode", + Justification = "Suppressing the same warnings as suppressed in the base DbConnectionStringBuilder. See https://github.com/dotnet/runtime/issues/97057")] public sealed partial class NpgsqlConnectionStringBuilder : DbConnectionStringBuilder, IDictionary { #region Fields @@ -27,9 +28,9 @@ public sealed partial class NpgsqlConnectionStringBuilder : DbConnectionStringBu /// string? _dataSourceCached; - internal string DataSourceCached - => _dataSourceCached ??= _host is null - ? string.Empty + internal string? DataSourceCached + => _dataSourceCached ??= _host is null || _host.Contains(",") + ? null : IsUnixSocket(_host, _port, out var socketPath, replaceForAbstract: false) ? socketPath : $"tcp://{_host}:{_port}"; @@ -65,6 +66,19 @@ public NpgsqlConnectionStringBuilder(string? connectionString) // Method fake-returns an int only to make sure it's code-generated private partial int Init(); + /// + /// GeneratedAction and GeneratedActions exist to be able to produce a streamlined binary footprint for NativeAOT. + /// An idiomatic approach where each action has its own method would double the binary size of NpgsqlConnectionStringBuilder. + /// + enum GeneratedAction + { + Set, + Get, + Remove, + GetCanonical + } + private partial bool GeneratedActions(GeneratedAction action, string keyword, ref object? value); + #endregion #region Non-static property handling @@ -93,7 +107,8 @@ public override object this[string keyword] try { - GeneratedSetter(keyword.ToUpperInvariant(), value); + var val = value; + GeneratedActions(GeneratedAction.Set, keyword.ToUpperInvariant(), ref val); } catch (Exception e) { @@ -102,9 +117,6 @@ public override object this[string keyword] } } - // Method fake-returns an int only to make sure it's code-generated - private partial int GeneratedSetter(string keyword, object? value); - object? IDictionary.this[string keyword] { get => this[keyword]; @@ -127,9 +139,10 @@ public void Add(KeyValuePair item) /// The key of the key/value pair to be removed from the connection string in this DbConnectionStringBuilder. /// true if the key existed within the connection string and was removed; false if the key did not exist. public override bool Remove(string keyword) - => RemoveGenerated(keyword.ToUpperInvariant()); - - private partial bool RemoveGenerated(string keyword); + { + object? value = null; + return GeneratedActions(GeneratedAction.Remove, keyword.ToUpperInvariant(), ref value); + } /// /// Removes the entry from the DbConnectionStringBuilder instance. @@ -145,7 +158,7 @@ public bool Remove(KeyValuePair item) public override void Clear() { Debug.Assert(Keys != null); - foreach (var k in Keys.ToArray()) + foreach (var k in (string[])Keys) Remove(k); } @@ -155,11 +168,10 @@ public override void Clear() /// The key to locate in the . /// true if the contains an entry with the specified key; otherwise false. public override bool ContainsKey(string keyword) - => keyword is null - ? throw new ArgumentNullException(nameof(keyword)) - : ContainsKeyGenerated(keyword.ToUpperInvariant()); - - private partial bool ContainsKeyGenerated(string keyword); + { + object? value = null; + return GeneratedActions(GeneratedAction.GetCanonical, (keyword ?? throw new ArgumentNullException(nameof(keyword))).ToUpperInvariant(), ref value); + } /// /// Determines whether the contains a specific key-value pair. @@ -178,25 +190,24 @@ public bool Contains(KeyValuePair item) /// true if keyword was found within the connection string, false otherwise. public override bool TryGetValue(string keyword, [NotNullWhen(true)] out object? value) { - if (keyword == null) - throw new ArgumentNullException(nameof(keyword)); - - return TryGetValueGenerated(keyword.ToUpperInvariant(), out value); + object? v = null; + var result = GeneratedActions(GeneratedAction.Get, (keyword ?? throw new ArgumentNullException(nameof(keyword))).ToUpperInvariant(), ref v); + value = v; + return result; } - private partial bool TryGetValueGenerated(string keyword, [NotNullWhen(true)] out object? value); - void SetValue(string propertyName, object? value) { - var canonicalKeyword = ToCanonicalKeyword(propertyName.ToUpperInvariant()); + object? canonicalKeyword = null; + var result = GeneratedActions(GeneratedAction.GetCanonical, (propertyName ?? throw new ArgumentNullException(nameof(propertyName))).ToUpperInvariant(), ref canonicalKeyword); + if (!result) + throw new KeyNotFoundException(); if (value == null) - base.Remove(canonicalKeyword); + base.Remove((string)canonicalKeyword!); else - base[canonicalKeyword] = value; + base[(string)canonicalKeyword!] = value; } - private partial string ToCanonicalKeyword(string keyword); - #endregion #region Properties - Connection @@ -233,8 +244,7 @@ public int Port get => _port; set { - if (value <= 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "Invalid port: " + value); + ArgumentOutOfRangeException.ThrowIfNegativeOrZero(value); _port = value; SetValue(nameof(Port), value); @@ -262,10 +272,10 @@ public string? Database string? _database; /// - /// The username to connect with. Not required if using IntegratedSecurity. + /// The username to connect with. /// [Category("Connection")] - [Description("The username to connect with. Not required if using IntegratedSecurity.")] + [Description("The username to connect with.")] [DisplayName("Username")] [NpgsqlConnectionStringProperty("User Name", "UserId", "User Id", "UID")] public string? Username @@ -280,10 +290,10 @@ public string? Username string? _username; /// - /// The password to connect with. Not required if using IntegratedSecurity. + /// The password to connect with. /// [Category("Connection")] - [Description("The password to connect with. Not required if using IntegratedSecurity.")] + [Description("The password to connect with.")] [PasswordPropertyText(true)] [DisplayName("Password")] [NpgsqlConnectionStringProperty("PSW", "PWD")] @@ -451,22 +461,42 @@ public SslMode SslMode SslMode _sslMode; /// - /// Whether to trust the server certificate without validating it. + /// Controls how SSL encryption is negotiated with the server, if SSL is used. /// [Category("Security")] - [Description("Whether to trust the server certificate without validating it.")] - [DisplayName("Trust Server Certificate")] + [Description("Controls how SSL encryption is negotiated with the server, if SSL is used.")] + [DisplayName("SSL Negotiation")] [NpgsqlConnectionStringProperty] - public bool TrustServerCertificate + public SslNegotiation SslNegotiation { - get => _trustServerCertificate; + get => UserProvidedSslNegotiation ?? SslNegotiation.Postgres; set { - _trustServerCertificate = value; - SetValue(nameof(TrustServerCertificate), value); + UserProvidedSslNegotiation = value; + SetValue(nameof(SslNegotiation), value); } } - bool _trustServerCertificate; + + internal SslNegotiation? UserProvidedSslNegotiation { get; private set; } + + /// + /// Controls whether GSS encryption is required, disabled or preferred, depending on server support. + /// + [Category("Security")] + [Description("Controls whether GSS encryption is required, disabled or preferred, depending on server support.")] + [DisplayName("GSS Encryption Mode")] + [NpgsqlConnectionStringProperty] + public GssEncryptionMode GssEncryptionMode + { + get => UserProvidedGssEncMode ?? GssEncryptionMode.Prefer; + set + { + UserProvidedGssEncMode = value; + SetValue(nameof(GssEncryptionMode), value); + } + } + + internal GssEncryptionMode? UserProvidedGssEncMode { get; private set; } /// /// Location of a client certificate to be sent to the server. @@ -559,28 +589,6 @@ public bool CheckCertificateRevocation } bool _checkCertificateRevocation; - /// - /// Whether to use Windows integrated security to log in. - /// - [Category("Security")] - [Description("Whether to use Windows integrated security to log in.")] - [DisplayName("Integrated Security")] - [NpgsqlConnectionStringProperty] - public bool IntegratedSecurity - { - get => _integratedSecurity; - set - { - // No integrated security if we're on mono and .NET 4.5 because of ClaimsIdentity, - // see https://github.com/npgsql/Npgsql/issues/133 - if (value && Type.GetType("Mono.Runtime") != null) - throw new NotSupportedException("IntegratedSecurity is currently unsupported on mono and .NET 4.5 (see https://github.com/npgsql/Npgsql/issues/133)"); - _integratedSecurity = value; - SetValue(nameof(IntegratedSecurity), value); - } - } - bool _integratedSecurity; - /// /// The Kerberos service name to be used for authentication. /// @@ -606,6 +614,7 @@ public string KerberosServiceName [Category("Security")] [Description("The Kerberos realm to be used for authentication.")] [DisplayName("Include Realm")] + [DefaultValue(true)] [NpgsqlConnectionStringProperty] public bool IncludeRealm { @@ -675,6 +684,107 @@ public bool IncludeErrorDetail } bool _includeErrorDetail; + /// + /// When enabled, failed statements are included on . + /// + [Category("Security")] + [Description("When enabled, failed batched commands are included on NpgsqlException.BatchCommand.")] + [DisplayName("Include Failed Batched Command")] + [NpgsqlConnectionStringProperty] + public bool IncludeFailedBatchedCommand + { + get => _includeFailedBatchedCommand; + set + { + _includeFailedBatchedCommand = value; + SetValue(nameof(IncludeFailedBatchedCommand), value); + } + } + bool _includeFailedBatchedCommand; + + /// + /// Controls whether channel binding is required, disabled or preferred, depending on server support. + /// + [Category("Security")] + [Description("Controls whether channel binding is required, disabled or preferred, depending on server support.")] + [DisplayName("Channel Binding")] + [DefaultValue(ChannelBinding.Prefer)] + [NpgsqlConnectionStringProperty] + public ChannelBinding ChannelBinding + { + get => _channelBinding; + set + { + _channelBinding = value; + SetValue(nameof(ChannelBinding), value); + } + } + ChannelBinding _channelBinding; + + /// + /// Controls the available authentication methods. + /// + [Category("Security")] + [Description("Controls the available authentication methods.")] + [DisplayName("Require Auth")] + [NpgsqlConnectionStringProperty] + public string? RequireAuth + { + get => _requireAuth; + set + { + RequireAuthModes = ParseAuthMode(value); + _requireAuth = value; + SetValue(nameof(RequireAuth), value); + } + } + string? _requireAuth; + + internal RequireAuthMode RequireAuthModes { get; private set; } + + internal static RequireAuthMode ParseAuthMode(string? value) + { + var modes = value?.Split(',', StringSplitOptions.TrimEntries | StringSplitOptions.RemoveEmptyEntries); + if (modes is not { Length: > 0 }) + return RequireAuthMode.All; + + var isNegative = false; + RequireAuthMode parsedModes = default; + for (var i = 0; i < modes.Length; i++) + { + var mode = modes[i]; + var modeToParse = mode.AsSpan(); + if (mode.StartsWith('!')) + { + if (i > 0 && !isNegative) + throw new ArgumentException("Mixing both positive and negative authentication methods is not supported"); + + modeToParse = modeToParse.Slice(1); + isNegative = true; + } + else + { + if (i > 0 && isNegative) + throw new ArgumentException("Mixing both positive and negative authentication methods is not supported"); + } + + // Explicitly disallow 'All' as libpq doesn't have it + if (!Enum.TryParse(modeToParse, out var parsedMode) || parsedMode == RequireAuthMode.All) + throw new ArgumentException($"Unable to parse authentication method \"{modeToParse}\""); + + parsedModes |= parsedMode; + } + + var allowedModes = isNegative + ? (RequireAuthMode)(RequireAuthMode.All - parsedModes) + : parsedModes; + + if (allowedModes == default) + throw new ArgumentException($"No authentication method is allowed. Check \"{nameof(RequireAuth)}\" in connection string."); + + return allowedModes; + } + #endregion #region Properties - Pooling @@ -711,8 +821,7 @@ public int MinPoolSize get => _minPoolSize; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "MinPoolSize can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _minPoolSize = value; SetValue(nameof(MinPoolSize), value); @@ -733,8 +842,7 @@ public int MaxPoolSize get => _maxPoolSize; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "MaxPoolSize can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _maxPoolSize = value; SetValue(nameof(MaxPoolSize), value); @@ -787,13 +895,17 @@ public int ConnectionPruningInterval /// /// The total maximum lifetime of connections (in seconds). Connections which have exceeded this value will be /// destroyed instead of returned from the pool. This is useful in clustered configurations to force load - /// balancing between a running server and a server just brought online. + /// balancing between a running server and a server just brought online. It can also be useful to prevent + /// runaway memory growth of connections at the PostgreSQL server side, because in some cases very long lived + /// connections slowly consume more and more memory over time. + /// Defaults to 3600 seconds (1 hour). /// - /// The time (in seconds) to wait, or 0 to to make connections last indefinitely (the default). + /// The time (in seconds) to wait, or 0 to to make connections last indefinitely. [Category("Pooling")] [Description("The total maximum lifetime of connections (in seconds).")] [DisplayName("Connection Lifetime")] [NpgsqlConnectionStringProperty("Load Balance Timeout")] + [DefaultValue(3600)] public int ConnectionLifetime { get => _connectionLifetime; @@ -823,8 +935,8 @@ public int Timeout get => _timeout; set { - if (value < 0 || value > NpgsqlConnection.TimeoutLimit) - throw new ArgumentOutOfRangeException(nameof(value), value, "Timeout must be between 0 and " + NpgsqlConnection.TimeoutLimit); + ArgumentOutOfRangeException.ThrowIfNegative(value); + ArgumentOutOfRangeException.ThrowIfGreaterThan(value, NpgsqlConnection.TimeoutLimit); _timeout = value; SetValue(nameof(Timeout), value); @@ -848,8 +960,7 @@ public int CommandTimeout get => _commandTimeout; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "CommandTimeout can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _commandTimeout = value; SetValue(nameof(CommandTimeout), value); @@ -857,29 +968,6 @@ public int CommandTimeout } int _commandTimeout; - /// - /// The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. - /// - [Category("Timeouts")] - [Description("The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. -1 uses CommandTimeout, 0 means no timeout.")] - [DisplayName("Internal Command Timeout")] - [NpgsqlConnectionStringProperty] - [DefaultValue(-1)] - public int InternalCommandTimeout - { - get => _internalCommandTimeout; - set - { - if (value != 0 && value != -1 && value < NpgsqlConnector.MinimumInternalCommandTimeout) - throw new ArgumentOutOfRangeException(nameof(value), value, - $"InternalCommandTimeout must be >= {NpgsqlConnector.MinimumInternalCommandTimeout}, 0 (infinite) or -1 (use CommandTimeout)"); - - _internalCommandTimeout = value; - SetValue(nameof(InternalCommandTimeout), value); - } - } - int _internalCommandTimeout; - /// /// The time to wait (in milliseconds) while trying to read a response for a cancellation request for a timed out or cancelled query, before terminating the attempt and generating an error. /// Zero for infinity, -1 to skip the wait. @@ -895,8 +983,7 @@ public int CancellationTimeout get => _cancellationTimeout; set { - if (value < -1) - throw new ArgumentOutOfRangeException(nameof(value), value, $"{nameof(CancellationTimeout)} can't less than -1"); + ArgumentOutOfRangeException.ThrowIfLessThan(value, -1); _cancellationTimeout = value; SetValue(nameof(CancellationTimeout), value); @@ -933,7 +1020,7 @@ public string? TargetSessionAttributes set { - TargetSessionAttributesParsed = value is null ? null : ParseTargetSessionAttributes(value); + TargetSessionAttributesParsed = value is null ? null : ParseTargetSessionAttributes(value.ToLowerInvariant()); SetValue(nameof(TargetSessionAttributes), value); } } @@ -985,8 +1072,7 @@ public int HostRecheckSeconds get => _hostRecheckSeconds; set { - if (value < 0) - throw new ArgumentException($"{HostRecheckSeconds} cannot be negative", nameof(HostRecheckSeconds)); + ArgumentOutOfRangeException.ThrowIfNegative(value); _hostRecheckSeconds = value; SetValue(nameof(HostRecheckSeconds), value); } @@ -995,52 +1081,6 @@ public int HostRecheckSeconds #endregion Properties - Failover and load balancing - #region Properties - Entity Framework - - /// - /// The database template to specify when creating a database in Entity Framework. If not specified, - /// PostgreSQL defaults to "template1". - /// - /// - /// https://www.postgresql.org/docs/current/static/manage-ag-templatedbs.html - /// - [Category("Entity Framework")] - [Description("The database template to specify when creating a database in Entity Framework. If not specified, PostgreSQL defaults to \"template1\".")] - [DisplayName("EF Template Database")] - [NpgsqlConnectionStringProperty] - public string? EntityTemplateDatabase - { - get => _entityTemplateDatabase; - set - { - _entityTemplateDatabase = value; - SetValue(nameof(EntityTemplateDatabase), value); - } - } - string? _entityTemplateDatabase; - - /// - /// The database admin to specify when creating and dropping a database in Entity Framework. This is needed because - /// Npgsql needs to connect to a database in order to send the create/drop database command. - /// If not specified, defaults to "template1". Check NpgsqlServices.UsingPostgresDBConnection for more information. - /// - [Category("Entity Framework")] - [Description("The database admin to specify when creating and dropping a database in Entity Framework. If not specified, defaults to \"template1\".")] - [DisplayName("EF Admin Database")] - [NpgsqlConnectionStringProperty] - public string? EntityAdminDatabase - { - get => _entityAdminDatabase; - set - { - _entityAdminDatabase = value; - SetValue(nameof(EntityAdminDatabase), value); - } - } - string? _entityAdminDatabase; - - #endregion - #region Properties - Advanced /// @@ -1056,8 +1096,7 @@ public int KeepAlive get => _keepAlive; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "KeepAlive can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _keepAlive = value; SetValue(nameof(KeepAlive), value); @@ -1097,8 +1136,7 @@ public int TcpKeepAliveTime get => _tcpKeepAliveTime; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "TcpKeepAliveTime can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _tcpKeepAliveTime = value; SetValue(nameof(TcpKeepAliveTime), value); @@ -1119,8 +1157,7 @@ public int TcpKeepAliveInterval get => _tcpKeepAliveInterval; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "TcpKeepAliveInterval can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _tcpKeepAliveInterval = value; SetValue(nameof(TcpKeepAliveInterval), value); @@ -1216,8 +1253,7 @@ public int MaxAutoPrepare get => _maxAutoPrepare; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, $"{nameof(MaxAutoPrepare)} cannot be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _maxAutoPrepare = value; SetValue(nameof(MaxAutoPrepare), value); @@ -1239,8 +1275,7 @@ public int AutoPrepareMinUsages get => _autoPrepareMinUsages; set { - if (value < 1) - throw new ArgumentOutOfRangeException(nameof(value), value, $"{nameof(AutoPrepareMinUsages)} must be 1 or greater"); + ArgumentOutOfRangeException.ThrowIfNegativeOrZero(value); _autoPrepareMinUsages = value; SetValue(nameof(AutoPrepareMinUsages), value); @@ -1267,24 +1302,6 @@ public bool NoResetOnClose } bool _noResetOnClose; - /// - /// Load table composite type definitions, and not just free-standing composite types. - /// - [Category("Advanced")] - [Description("Load table composite type definitions, and not just free-standing composite types.")] - [DisplayName("Load Table Composites")] - [NpgsqlConnectionStringProperty] - public bool LoadTableComposites - { - get => _loadTableComposites; - set - { - _loadTableComposites = value; - SetValue(nameof(LoadTableComposites), value); - } - } - bool _loadTableComposites; - /// /// Set the replication mode of the connection /// @@ -1348,51 +1365,26 @@ public ArrayNullabilityMode ArrayNullabilityMode #endregion - #region Multiplexing - - /// - /// Enables multiplexing, which allows more efficient use of connections. - /// - [Category("Multiplexing")] - [Description("Enables multiplexing, which allows more efficient use of connections.")] - [DisplayName("Multiplexing")] - [NpgsqlConnectionStringProperty] - [DefaultValue(false)] - public bool Multiplexing - { - get => _multiplexing; - set - { - _multiplexing = value; - SetValue(nameof(Multiplexing), value); - } - } - bool _multiplexing; + #region Properties - Obsolete /// - /// When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before - /// flushing to the network. + /// Load table composite type definitions, and not just free-standing composite types. /// - [Category("Multiplexing")] - [Description("When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before " + - "flushing to the network.")] - [DisplayName("Write Coalescing Buffer Threshold Bytes")] + [Category("Advanced")] + [Description("Load table composite type definitions, and not just free-standing composite types.")] + [DisplayName("Load Table Composites")] [NpgsqlConnectionStringProperty] - [DefaultValue(1000)] - public int WriteCoalescingBufferThresholdBytes + [Obsolete("Specifying type loading options through the connection string is obsolete, use the DataSource builder instead. See the 9.0 release notes for more information.")] + public bool LoadTableComposites { - get => _writeCoalescingBufferThresholdBytes; + get => _loadTableComposites; set { - _writeCoalescingBufferThresholdBytes = value; - SetValue(nameof(WriteCoalescingBufferThresholdBytes), value); + _loadTableComposites = value; + SetValue(nameof(LoadTableComposites), value); } } - int _writeCoalescingBufferThresholdBytes; - - #endregion - - #region Properties - Compatibility + bool _loadTableComposites; /// /// A compatibility mode for special PostgreSQL server types. @@ -1401,9 +1393,11 @@ public int WriteCoalescingBufferThresholdBytes [Description("A compatibility mode for special PostgreSQL server types.")] [DisplayName("Server Compatibility Mode")] [NpgsqlConnectionStringProperty] + [Obsolete("Specifying type loading options through the connection string is obsolete, use the DataSource builder instead. See the 9.0 release notes for more information.")] public ServerCompatibilityMode ServerCompatibilityMode { - get => _serverCompatibilityMode; + // Physical replication connections don't allow regular queries, so we can't load types from PG + get => ReplicationMode is ReplicationMode.Physical ? ServerCompatibilityMode.NoTypeLoading : _serverCompatibilityMode; set { _serverCompatibilityMode = value; @@ -1412,150 +1406,48 @@ public ServerCompatibilityMode ServerCompatibilityMode } ServerCompatibilityMode _serverCompatibilityMode; - #endregion - - #region Properties - Obsolete - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/6.0.html - /// - [Category("Compatibility")] - [Description("Makes MaxValue and MinValue timestamps and dates readable as infinity and negative infinity.")] - [DisplayName("Convert Infinity DateTime")] - [NpgsqlConnectionStringProperty] - [Obsolete("The ConvertInfinityDateTime parameter is no longer supported.")] - public bool ConvertInfinityDateTime - { - get => false; - set => throw new NotSupportedException("The Convert Infinity DateTime parameter is no longer supported; Npgsql 6.0 and above convert min/max values to Infinity by default. See https://www.npgsql.org/doc/types/datetime.html for more details."); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html")] - [DisplayName("Continuous Processing")] - [NpgsqlConnectionStringProperty] - [Obsolete("The ContinuousProcessing parameter is no longer supported.")] - public bool ContinuousProcessing - { - get => false; - set => throw new NotSupportedException("The ContinuousProcessing parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.1.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html")] - [DisplayName("Backend Timeouts")] - [NpgsqlConnectionStringProperty] - [Obsolete("The BackendTimeouts parameter is no longer supported")] - public bool BackendTimeouts - { - get => false; - set => throw new NotSupportedException("The BackendTimeouts parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.1.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.0.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/v/3.0.html")] - [DisplayName("Preload Reader")] - [NpgsqlConnectionStringProperty] - [Obsolete("The PreloadReader parameter is no longer supported")] - public bool PreloadReader - { - get => false; - set => throw new NotSupportedException("The PreloadReader parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.0.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.0.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/3.0.html")] - [DisplayName("Use Extended Types")] - [NpgsqlConnectionStringProperty] - [Obsolete("The UseExtendedTypes parameter is no longer supported")] - public bool UseExtendedTypes - { - get => false; - set => throw new NotSupportedException("The UseExtendedTypes parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.0.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/4.1.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/4.1.html")] - [DisplayName("Use Ssl Stream")] - [NpgsqlConnectionStringProperty] - [Obsolete("The UseSslStream parameter is no longer supported (always true)")] - public bool UseSslStream - { - get => true; - set => throw new NotSupportedException("The UseSslStream parameter is no longer supported (SslStream is always used). Please see https://www.npgsql.org/doc/release-notes/4.1.html"); - } - - /// - /// Writes connection performance information to performance counters. - /// - [Category("Obsolete")] - [Description("Writes connection performance information to performance counters.")] - [DisplayName("Use Perf Counters")] - [NpgsqlConnectionStringProperty] - [Obsolete("The UsePerfCounters parameter is no longer supported")] - public bool UsePerfCounters - { - get => false; - set => throw new NotSupportedException("The UsePerfCounters parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/5.0.html"); - } - /// - /// Location of a client certificate to be sent to the server. + /// Whether to trust the server certificate without validating it. /// - [Category("Obsolete")] - [Description("Location of a client certificate to be sent to the server.")] - [DisplayName("Client Certificate")] + [Category("Security")] + [Description("Whether to trust the server certificate without validating it.")] + [DisplayName("Trust Server Certificate")] + [Obsolete("The TrustServerCertificate parameter is no longer needed and does nothing.")] [NpgsqlConnectionStringProperty] - [Obsolete("Use NpgsqlConnectionStringBuilder.SslKey instead")] - public string? ClientCertificate + public bool TrustServerCertificate { - get => SslKey; - set => SslKey = value; + get => _trustServerCertificate; + set + { + _trustServerCertificate = value; + SetValue(nameof(TrustServerCertificate), value); + } } + bool _trustServerCertificate; /// - /// Key for a client certificate to be sent to the server. + /// The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. /// [Category("Obsolete")] - [Description("Key for a client certificate to be sent to the server.")] - [DisplayName("Client Certificate Key")] + [Description("The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. -1 uses CommandTimeout, 0 means no timeout.")] + [DisplayName("Internal Command Timeout")] [NpgsqlConnectionStringProperty] - [Obsolete("Use NpgsqlConnectionStringBuilder.SslPassword instead")] - public string? ClientCertificateKey + [DefaultValue(-1)] + [Obsolete("The InternalCommandTimeout parameter is no longer needed and does nothing.")] + public int InternalCommandTimeout { - get => SslPassword; - set => SslPassword = value; - } + get => _internalCommandTimeout; + set + { + if (value != 0 && value != -1 && value < NpgsqlConnector.MinimumInternalCommandTimeout) + throw new ArgumentOutOfRangeException(nameof(value), value, + $"InternalCommandTimeout must be >= {NpgsqlConnector.MinimumInternalCommandTimeout}, 0 (infinite) or -1 (use CommandTimeout)"); - /// - /// When enabled, PostgreSQL error details are included on and - /// . These can contain sensitive data. - /// - [Category("Obsolete")] - [Description("When enabled, PostgreSQL error and notice details are included on PostgresException.Detail and PostgresNotice.Detail. These can contain sensitive data.")] - [DisplayName("Include Error Details")] - [NpgsqlConnectionStringProperty] - [Obsolete("Use NpgsqlConnectionStringBuilder.IncludeErrorDetail instead")] - public bool IncludeErrorDetails - { - get => IncludeErrorDetail; - set => IncludeErrorDetail = value; + _internalCommandTimeout = value; + SetValue(nameof(InternalCommandTimeout), value); + } } + int _internalCommandTimeout; #endregion @@ -1563,12 +1455,9 @@ public bool IncludeErrorDetails internal void PostProcessAndValidate() { - if (string.IsNullOrWhiteSpace(Host)) - throw new ArgumentException("Host can't be null"); - if (Multiplexing && !Pooling) - throw new ArgumentException("Pooling must be on to use multiplexing"); - if (TrustServerCertificate && SslMode is SslMode.Allow or SslMode.VerifyCA or SslMode.VerifyFull) - throw new ArgumentException(NpgsqlStrings.CannotUseTrustServerCertificate); + ArgumentException.ThrowIfNullOrWhiteSpace(Host); + if (SslNegotiation == SslNegotiation.Direct && SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) + throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); if (!Host.Contains(',')) { @@ -1616,7 +1505,7 @@ internal static bool TrySplitHostPort(ReadOnlySpan originalHost, [NotNullW var ipv6End = originalHost.LastIndexOf(']'); if (otherColon == -1 || portSeparator > ipv6End && otherColon < ipv6End) { - port = originalHost.Slice(portSeparator + 1).ParseInt(); + port = int.Parse(originalHost.Slice(portSeparator + 1)); host = originalHost.Slice(0, portSeparator).ToString(); return true; } @@ -1670,12 +1559,32 @@ public override bool Equals(object? obj) /// /// Gets an containing the keys of the . /// - public new ICollection Keys => base.Keys.Cast().ToArray()!; + public new ICollection Keys + { + get + { + var result = new string[base.Keys.Count]; + var i = 0; + foreach (var key in base.Keys) + result[i++] = (string)key; + return result; + } + } /// /// Gets an containing the values in the . /// - public new ICollection Values => base.Values.Cast().ToArray(); + public new ICollection Values + { + get + { + var result = new object?[base.Keys.Count]; + var i = 0; + foreach (var key in base.Values) + result[i++] = (object?)key; + return result; + } + } /// /// Copies the elements of the to an Array, starting at a particular Array index. @@ -1708,19 +1617,35 @@ public void CopyTo(KeyValuePair[] array, int arrayIndex) #region ICustomTypeDescriptor /// + [RequiresUnreferencedCode("PropertyDescriptor's PropertyType cannot be statically discovered.")] protected override void GetProperties(Hashtable propertyDescriptors) { // Tweak which properties are exposed via TypeDescriptor. This affects the VS DDEX // provider, for example. base.GetProperties(propertyDescriptors); - var toRemove = propertyDescriptors.Values - .Cast() - .Where(d => - !d.Attributes.Cast().Any(a => a is NpgsqlConnectionStringPropertyAttribute) || - d.Attributes.Cast().Any(a => a is ObsoleteAttribute) - ) - .ToList(); + var toRemove = new List(); + foreach (var value in propertyDescriptors.Values) + { + var d = (PropertyDescriptor)value; + var isConnectionStringProperty = false; + var isObsolete = false; + foreach (var attribute in d.Attributes) + { + if (attribute is NpgsqlConnectionStringPropertyAttribute) + { + isConnectionStringProperty = true; + } + else if (attribute is ObsoleteAttribute) + { + isObsolete = true; + } + } + + if (!isConnectionStringProperty || isObsolete) + toRemove.Add(d); + } + foreach (var o in toRemove) propertyDescriptors.Remove(o.DisplayName); } @@ -1746,7 +1671,7 @@ sealed class NpgsqlConnectionStringPropertyAttribute : Attribute /// Creates a . /// public NpgsqlConnectionStringPropertyAttribute() - => Synonyms = Array.Empty(); + => Synonyms = []; /// /// Creates a . @@ -1759,26 +1684,6 @@ public NpgsqlConnectionStringPropertyAttribute(params string[] synonyms) #region Enums -/// -/// An option specified in the connection string that activates special compatibility features. -/// -public enum ServerCompatibilityMode -{ - /// - /// No special server compatibility mode is active - /// - None, - /// - /// The server is an Amazon Redshift instance. - /// - Redshift, - /// - /// The server is doesn't support full type loading from the PostgreSQL catalogs, support the basic set - /// of types via information hardcoded inside Npgsql. - /// - NoTypeLoading, -} - /// /// Specifies how to manage SSL. /// @@ -1810,6 +1715,59 @@ public enum SslMode VerifyFull } +/// +/// Specifies how to initialize SSL session. +/// +public enum SslNegotiation +{ + /// + /// Perform PostgreSQL protocol negotiation. + /// + Postgres, + /// + /// Start SSL handshake directly after establishing the TCP/IP connection. + /// + Direct +} + +/// +/// Specifies how to manage GSS encryption. +/// +public enum GssEncryptionMode +{ + /// + /// GSS encryption is disabled. If the server requires GSS encryption, the connection will fail. + /// + Disable, + /// + /// Prefer GSS encrypted connections if the server allows them, but allow connections without GSS encryption. + /// + Prefer, + /// + /// Fail the connection if the server doesn't support GSS encryption. + /// + Require +} + +/// +/// Specifies how to manage channel binding. +/// +public enum ChannelBinding +{ + /// + /// Channel binding is disabled. If the server requires channel binding, the connection will fail. + /// + Disable, + /// + /// Prefer channel binding if the server allows it, but connect without it if not. + /// + Prefer, + /// + /// Fail the connection if the server doesn't support channel binding. + /// + Require +} + /// /// Specifies how the mapping of arrays of /// value types @@ -1865,4 +1823,40 @@ enum ReplicationMode Logical } +/// +/// Specifies which authentication methods are supported. +/// +[Flags] +enum RequireAuthMode +{ + /// + /// Plaintext password. + /// + Password = 1, + /// + /// MD5 hashed password. + /// + MD5 = 2, + /// + /// Kerberos. + /// + GSS = 4, + /// + /// Windows SSPI. + /// + SSPI = 8, + /// + /// SASL. + /// + ScramSHA256 = 16, + /// + /// No authentication exchange. + /// + None = 32, + /// + /// All authentication methods. For internal use. + /// + All = Password | MD5 | GSS | SSPI | ScramSHA256 | None +} + #endregion diff --git a/src/Npgsql/NpgsqlDataAdapter.cs b/src/Npgsql/NpgsqlDataAdapter.cs index 0c8e0822ce..f98f4cca61 100644 --- a/src/Npgsql/NpgsqlDataAdapter.cs +++ b/src/Npgsql/NpgsqlDataAdapter.cs @@ -1,6 +1,7 @@ using System; using System.Data; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; @@ -140,6 +141,7 @@ protected override void OnRowUpdating(RowUpdatingEventArgs value) } // Temporary implementation, waiting for official support in System.Data via https://github.com/dotnet/runtime/issues/22109 + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] internal async Task Fill(DataTable dataTable, bool async, CancellationToken cancellationToken = default) { var command = SelectCommand; @@ -150,17 +152,17 @@ internal async Task Fill(DataTable dataTable, bool async, CancellationToken { originalState = activeConnection.State; if (ConnectionState.Closed == originalState) - await activeConnection.Open(async, cancellationToken); + await activeConnection.Open(async, cancellationToken).ConfigureAwait(false); - var dataReader = await command.ExecuteReader(CommandBehavior.Default, async, cancellationToken); + var dataReader = await command.ExecuteReader(async, CommandBehavior.Default, cancellationToken).ConfigureAwait(false); try { - return await Fill(dataTable, dataReader, async, cancellationToken); + return await Fill(dataTable, dataReader, async, cancellationToken).ConfigureAwait(false); } finally { if (async) - await dataReader.DisposeAsync(); + await dataReader.DisposeAsync().ConfigureAwait(false); else dataReader.Dispose(); } @@ -172,6 +174,7 @@ internal async Task Fill(DataTable dataTable, bool async, CancellationToken } } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] async Task Fill(DataTable dataTable, NpgsqlDataReader dataReader, bool async, CancellationToken cancellationToken = default) { dataTable.BeginLoadData(); @@ -193,7 +196,7 @@ async Task Fill(DataTable dataTable, NpgsqlDataReader dataReader, bool asyn var values = new object[count]; - while (async ? await dataReader.ReadAsync(cancellationToken) : dataReader.Read()) + while (async ? await dataReader.ReadAsync(cancellationToken).ConfigureAwait(false) : dataReader.Read()) { dataReader.GetValues(values); dataTable.LoadDataRow(values, true); @@ -210,18 +213,18 @@ async Task Fill(DataTable dataTable, NpgsqlDataReader dataReader, bool asyn #pragma warning disable 1591 -public class NpgsqlRowUpdatingEventArgs : RowUpdatingEventArgs -{ - public NpgsqlRowUpdatingEventArgs(DataRow dataRow, IDbCommand? command, System.Data.StatementType statementType, - DataTableMapping tableMapping) - : base(dataRow, command, statementType, tableMapping) {} -} - -public class NpgsqlRowUpdatedEventArgs : RowUpdatedEventArgs -{ - public NpgsqlRowUpdatedEventArgs(DataRow dataRow, IDbCommand? command, System.Data.StatementType statementType, - DataTableMapping tableMapping) - : base(dataRow, command, statementType, tableMapping) {} -} - -#pragma warning restore 1591 \ No newline at end of file +public class NpgsqlRowUpdatingEventArgs( + DataRow dataRow, + IDbCommand? command, + System.Data.StatementType statementType, + DataTableMapping tableMapping) + : RowUpdatingEventArgs(dataRow, command, statementType, tableMapping); + +public class NpgsqlRowUpdatedEventArgs( + DataRow dataRow, + IDbCommand? command, + System.Data.StatementType statementType, + DataTableMapping tableMapping) + : RowUpdatedEventArgs(dataRow, command, statementType, tableMapping); + +#pragma warning restore 1591 diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index c6fc499720..1f75aa012f 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1,4 +1,5 @@ using System; +using System.Buffers; using System.Collections; using System.Collections.Generic; using System.Collections.ObjectModel; @@ -7,24 +8,19 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; -using System.Linq; using System.Runtime.CompilerServices; using System.Runtime.ExceptionServices; -using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.Converters; using Npgsql.PostgresTypes; using Npgsql.Schema; -using Npgsql.Util; using NpgsqlTypes; using static Npgsql.Util.Statics; -#pragma warning disable CA2222 // Do not decrease inherited member visibility namespace Npgsql; /// @@ -34,6 +30,10 @@ namespace Npgsql; public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator #pragma warning restore CA1010 { + const int DbNullSentinel = -1; + static readonly Task TrueTask = Task.FromResult(true); + static readonly Task FalseTask = Task.FromResult(false); + internal NpgsqlCommand Command { get; private set; } = default!; internal NpgsqlConnector Connector { get; } NpgsqlConnection? _connection; @@ -44,14 +44,14 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator CommandBehavior _behavior; /// - /// In multiplexing, this is as the sending is managed in the write multiplexing loop, - /// and does not need to be awaited by the reader. + /// The task for writing this command's messages. Awaited on reader cleanup. /// Task? _sendTask; internal ReaderState State = ReaderState.Disposed; internal NpgsqlReadBuffer Buffer = default!; + PgReader PgReader => Buffer.PgReader; /// /// Holds the list of statements being executed by this reader. @@ -63,32 +63,20 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// internal int StatementIndex { get; private set; } - /// - /// The number of columns in the current row - /// - int _numColumns; - /// /// Records, for each column, its starting offset and length in the current row. /// Used only in non-sequential mode. /// - readonly List<(int Offset, int Length)> _columns = new(); + readonly List<(int Offset, int Length)> _columns = []; + int _columnsStartPos; /// - /// The index of the column that we're on, i.e. that has already been parsed, is + /// The index of the column that we're on, i.e. that has already been parsed, /// is memory and can be retrieved. Initialized to -1, which means we're on the column /// count (which comes before the first column). /// int _column; - /// - /// For streaming types (e.g. bytea), holds the byte length of the column. - /// Does not include the length prefix. - /// - internal int ColumnLen; - - internal int PosInColumn; - /// /// The position in the buffer at which the current data row message ends. /// Used only when the row is consumed non-sequentially. @@ -100,15 +88,31 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// Mostly useful for a sequential mode, when the row is already in the buffer. /// Should always be true for the non-sequential mode. /// - bool _canConsumeRowNonSequentially; + bool _isRowBuffered; - int _charPos; + /// + /// Gets or sets whether the current row is fully buffered in memory. + /// When , async reads will go through the real async converter path rather than the sync shortcut. + /// + /// Settable for testing purposes. + internal bool IsRowBuffered + { + get => _isRowBuffered; + set => _isRowBuffered = value; + } /// /// The RowDescription message for the current resultset being processed /// internal RowDescriptionMessage? RowDescription; + int ColumnCount => RowDescription!.Count; + + /// + /// Stores the last converter info resolved by column, to speed up repeated reading. + /// + ReadConversionContext[]? ConversionContextCache { get; set; } + ulong? _recordsAffected; /// @@ -124,24 +128,9 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator bool _isSchemaOnly; bool _isSequential; - /// - /// A stream that has been opened on a column. - /// - NpgsqlReadBuffer.ColumnStream? _columnStream; - - /// - /// Used for internal temporary purposes - /// - char[]? _tempCharBuf; - - /// - /// Used to keep track of every unique row this reader object ever traverses. - /// This is used to detect whether nested DbDataReaders are still valid. - /// - internal ulong UniqueRowId; - internal NpgsqlNestedDataReader? CachedFreeNestedDataReader; + long _startTimestamp; readonly ILogger _commandLogger; internal NpgsqlDataReader(NpgsqlConnector connector) @@ -154,8 +143,10 @@ internal void Init( NpgsqlCommand command, CommandBehavior behavior, List statements, + long startTimestamp = 0, Task? sendTask = null) { + Debug.Assert(ConversionContextCache is null); Command = command; _connection = command.InternalConnection; _behavior = behavior; @@ -166,6 +157,7 @@ internal void Init( _sendTask = sendTask; State = ReaderState.BetweenResults; _recordsAffected = null; + _startTimestamp = startTimestamp; } #region Read @@ -179,13 +171,8 @@ internal void Init( /// public override bool Read() { - CheckClosedOrDisposed(); - - UniqueRowId++; - var fastRead = TryFastRead(); - return fastRead.HasValue - ? fastRead.Value - : Read(false).GetAwaiter().GetResult(); + ThrowIfClosedOrDisposed(); + return TryRead()?.Result ?? Read(false).GetAwaiter().GetResult(); } /// @@ -197,65 +184,58 @@ public override bool Read() /// A task representing the asynchronous operation. public override Task ReadAsync(CancellationToken cancellationToken) { - CheckClosedOrDisposed(); - - UniqueRowId++; - var fastRead = TryFastRead(); - if (fastRead.HasValue) - return fastRead.Value ? PGUtil.TrueTask : PGUtil.FalseTask; - - using (NoSynchronizationContextScope.Enter()) - return Read(true, cancellationToken); + ThrowIfClosedOrDisposed(); + return TryRead() ?? Read(async: true, cancellationToken); } - bool? TryFastRead() + // This is an optimized execution path that avoids calling any async methods for the (usual) + // case where the next row (or CommandComplete) is already in memory. + Task? TryRead() { - // This is an optimized execution path that avoids calling any async methods for the (usual) - // case where the next row (or CommandComplete) is already in memory. - - if (_behavior.HasFlag(CommandBehavior.SingleRow)) - return null; - switch (State) { case ReaderState.BeforeResult: // First Read() after NextResult. Data row has already been processed. State = ReaderState.InResult; - return true; + return TrueTask; case ReaderState.InResult: - if (!_canConsumeRowNonSequentially) - return null; - // We get here, if we're in a non-sequential mode (or the row is already in the buffer) - ConsumeRowNonSequential(); break; - case ReaderState.BetweenResults: - case ReaderState.Consumed: - case ReaderState.Closed: - case ReaderState.Disposed: - return false; + default: + return FalseTask; } - var readBuf = Connector.ReadBuffer; - if (readBuf.ReadBytesLeft < 5) + // We have a special case path for SingleRow. + if (_behavior.HasFlag(CommandBehavior.SingleRow) || !_isRowBuffered) return null; - var messageCode = (BackendMessageCode)readBuf.ReadByte(); - var len = readBuf.ReadInt32() - 4; // Transmitted length includes itself - if (messageCode != BackendMessageCode.DataRow || readBuf.ReadBytesLeft < len) - { - readBuf.ReadPosition -= 5; + + ConsumeBufferedRow(); + + const int headerSize = sizeof(byte) + sizeof(int); + var buffer = Buffer; + var readPosition = buffer.ReadPosition; + var bytesLeft = buffer.FilledBytes - readPosition; + if (bytesLeft < headerSize) + return null; + var messageCode = (BackendMessageCode)buffer.ReadByte(); + var len = buffer.ReadInt32() - sizeof(int); // Transmitted length includes itself + var isDataRow = messageCode is BackendMessageCode.DataRow; + // sizeof(short) is for the number of columns + var sufficientBytes = isDataRow && _isSequential ? headerSize + sizeof(short) : headerSize + len; + if (bytesLeft < sufficientBytes + || !isDataRow && (_statements[StatementIndex].AppendErrorBarrier ?? Command.EnableErrorBarriers) + // Could be an error, let main read handle it. + || Connector.ParseResultSetMessage(buffer, messageCode, len) is not { } msg) + { + buffer.ReadPosition = readPosition; return null; } - - var msg = Connector.ParseServerMessage(readBuf, messageCode, len, false)!; - Debug.Assert(msg.Code == BackendMessageCode.DataRow); ProcessMessage(msg); - return true; + return isDataRow ? TrueTask : FalseTask; } async Task Read(bool async, CancellationToken cancellationToken = default) { - var registration = Connector.StartNestedCancellableOperation(cancellationToken); - + using var registration = Connector.StartNestedCancellableOperation(cancellationToken); try { switch (State) @@ -266,11 +246,11 @@ async Task Read(bool async, CancellationToken cancellationToken = default) return true; case ReaderState.InResult: - await ConsumeRow(async); + await ConsumeRow(async).ConfigureAwait(false); if (_behavior.HasFlag(CommandBehavior.SingleRow)) { // TODO: See optimization proposal in #410 - await Consume(async); + await Consume(async).ConfigureAwait(false); return false; } break; @@ -281,10 +261,11 @@ async Task Read(bool async, CancellationToken cancellationToken = default) case ReaderState.Disposed: return false; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return false; } - var msg = await ReadMessage(async); + var msg = await ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { @@ -296,7 +277,7 @@ async Task Read(bool async, CancellationToken cancellationToken = default) case BackendMessageCode.EmptyQueryResponse: ProcessMessage(msg); if (_statements[StatementIndex].AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); return false; default: @@ -305,13 +286,11 @@ async Task Read(bool async, CancellationToken cancellationToken = default) } catch { - State = ReaderState.Consumed; + // Break may have progressed the reader already. + if (State is not ReaderState.Closed) + State = ReaderState.Consumed; throw; } - finally - { - registration.Dispose(); - } } ValueTask ReadMessage(bool async) @@ -320,11 +299,11 @@ ValueTask ReadMessage(bool async) static async ValueTask ReadMessageSequential(NpgsqlConnector connector, bool async) { - var msg = await connector.ReadMessage(async, DataRowLoadingMode.Sequential); + var msg = await connector.ReadMessage(async, DataRowLoadingMode.Sequential).ConfigureAwait(false); if (msg.Code == BackendMessageCode.DataRow) { // Make sure that the datarow's column count is already buffered - await connector.ReadBuffer.Ensure(2, async); + await connector.ReadBuffer.Ensure(2, async).ConfigureAwait(false); return msg; } return msg; @@ -339,8 +318,12 @@ static async ValueTask ReadMessageSequential(NpgsqlConnector co /// Advances the reader to the next result when reading the results of a batch of statements. /// /// - public override bool NextResult() => (_isSchemaOnly ? NextResultSchemaOnly(false) : NextResult(false)) - .GetAwaiter().GetResult(); + public override bool NextResult() + { + ThrowIfClosedOrDisposed(); + return (_isSchemaOnly ? NextResultSchemaOnly(false) : NextResult(false)) + .GetAwaiter().GetResult(); + } /// /// This is the asynchronous version of NextResult. @@ -351,8 +334,7 @@ public override bool NextResult() => (_isSchemaOnly ? NextResultSchemaOnly(false /// A task representing the asynchronous operation. public override Task NextResultAsync(CancellationToken cancellationToken) { - using var _ = NoSynchronizationContextScope.Enter(); - + ThrowIfClosedOrDisposed(); return _isSchemaOnly ? NextResultSchemaOnly(async: true, cancellationToken: cancellationToken) : NextResult(async: true, cancellationToken: cancellationToken); @@ -361,104 +343,75 @@ public override Task NextResultAsync(CancellationToken cancellationToken) /// /// Internal implementation of NextResult /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] async Task NextResult(bool async, bool isConsuming = false, CancellationToken cancellationToken = default) { - CheckClosedOrDisposed(); - - IBackendMessage msg; Debug.Assert(!_isSchemaOnly); - - using var registration = isConsuming ? default : Connector.StartNestedCancellableOperation(cancellationToken); + if (State is ReaderState.Consumed) + return false; try { + using var registration = isConsuming ? default : Connector.StartNestedCancellableOperation(cancellationToken); // If we're in the middle of a resultset, consume it - switch (State) - { - case ReaderState.BeforeResult: - case ReaderState.InResult: - await ConsumeRow(async); - while (true) - { - var completedMsg = await Connector.ReadMessage(async, DataRowLoadingMode.Skip); - switch (completedMsg.Code) - { - case BackendMessageCode.CommandComplete: - case BackendMessageCode.EmptyQueryResponse: - ProcessMessage(completedMsg); - - if (_statements[StatementIndex].AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); - - break; - - default: - continue; - } - - break; - } - - break; + if (State is ReaderState.BeforeResult or ReaderState.InResult) + await ConsumeResultSet(async).ConfigureAwait(false); - case ReaderState.BetweenResults: - break; - - case ReaderState.Consumed: - case ReaderState.Closed: - case ReaderState.Disposed: - return false; - default: - throw new ArgumentOutOfRangeException(); - } + Debug.Assert(State is ReaderState.BetweenResults); - Debug.Assert(State == ReaderState.BetweenResults); _hasRows = false; - if (_behavior.HasFlag(CommandBehavior.SingleResult) && StatementIndex == 0 && !isConsuming) + var statements = _statements; + var statementIndex = StatementIndex; + if (statementIndex >= 0) { - await Consume(async); - return false; + if (RowDescription is { } description && statements[statementIndex].IsPrepared && ConversionContextCache is { } cache) + description.SetColumnInfoCache(new(cache, 0, ColumnCount)); + + if (statementIndex is 0 && _behavior.HasFlag(CommandBehavior.SingleResult) && !isConsuming) + { + await Consume(async).ConfigureAwait(false); + return false; + } } // We are now at the end of the previous result set. Read up to the next result set, if any. // Non-prepared statements receive ParseComplete, BindComplete, DescriptionRow/NoData, // prepared statements receive only BindComplete - for (StatementIndex++; StatementIndex < _statements.Count; StatementIndex++) + for (statementIndex = ++StatementIndex; statementIndex < statements.Count; statementIndex = ++StatementIndex) { - var statement = _statements[StatementIndex]; + var statement = statements[statementIndex]; + IBackendMessage msg; if (statement.TryGetPrepared(out var preparedStatement)) { - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); RowDescription = preparedStatement.Description; } else // Non-prepared/preparing flow { - var pStatement = statement.PreparedStatement; - if (pStatement != null) + preparedStatement = statement.PreparedStatement; + if (preparedStatement != null) { - Debug.Assert(!pStatement.IsPrepared); - if (pStatement.StatementBeingReplaced != null) + Debug.Assert(!preparedStatement.IsPrepared); + if (preparedStatement.StatementBeingReplaced != null) { - Expect(await Connector.ReadMessage(async), Connector); - pStatement.StatementBeingReplaced.CompleteUnprepare(); - pStatement.StatementBeingReplaced = null; + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); + preparedStatement.StatementBeingReplaced.CompleteUnprepare(); + preparedStatement.StatementBeingReplaced = null; } } - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); if (statement.IsPreparing) { - pStatement!.State = PreparedState.Prepared; + preparedStatement!.State = PreparedState.Prepared; Connector.PreparedStatementManager.NumPrepared++; statement.IsPreparing = false; } - Expect(await Connector.ReadMessage(async), Connector); - msg = await Connector.ReadMessage(async); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); + msg = await Connector.ReadMessage(async).ConfigureAwait(false); RowDescription = statement.Description = msg.Code switch { @@ -466,7 +419,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // RowDescription messages are cached on the connector, but if we're auto-preparing, we need to // clone our own copy which will last beyond the lifetime of this invocation. - BackendMessageCode.RowDescription => pStatement == null + BackendMessageCode.RowDescription => preparedStatement == null ? (RowDescriptionMessage)msg : ((RowDescriptionMessage)msg).Clone(), @@ -474,12 +427,25 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo }; } - if (RowDescription == null) + if (RowDescription is not null) + { + if (ConversionContextCache?.Length >= ColumnCount) + Array.Clear(ConversionContextCache, 0, ColumnCount); + else + { + if (ConversionContextCache is { } cache) + ArrayPool.Shared.Return(cache, clearArray: true); + ConversionContextCache = ArrayPool.Shared.Rent(ColumnCount); + } + if (statement.IsPrepared) + RowDescription.LoadColumnInfoCache(Connector.SerializerOptions, ConversionContextCache); + } + else { // Statement did not generate a resultset (e.g. INSERT) // Read and process its completion message and move on to the next statement - - msg = await ReadMessage(async); + // No need to read sequentially as it's not a DataRow + msg = await Connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CommandComplete: @@ -489,6 +455,9 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo throw Connector.Break(new NotSupportedException( "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql. " + "If you are trying to execute a SQL script created by pg_dump, pass the '--inserts' switch to disable generating COPY statements.")); + case BackendMessageCode.CopyOutResponse: + throw Connector.Break(new NotSupportedException( + "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql.")); default: throw Connector.UnexpectedMessageReceived(msg.Code); } @@ -496,43 +465,82 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo ProcessMessage(msg); if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); continue; } - if (!Command.IsWrappedByBatch && StatementIndex == 0 && Command.Parameters.HasOutputParameters) + if ((Command.WrappingBatch is not null || StatementIndex is 0) && Command.InternalBatchCommands[StatementIndex] is { HasOutputParameters: true } command) { - // If output parameters are present and this is the first row of the first resultset, + // If output parameters are present and this is the first row of the resultset, // we must always read it in non-sequential mode because it will be traversed twice (once // here for the parameters, then as a regular row). - msg = await Connector.ReadMessage(async); + msg = await Connector.ReadMessage(async, dataRowLoadingMode: DataRowLoadingMode.NonSequential).ConfigureAwait(false); ProcessMessage(msg); if (msg.Code == BackendMessageCode.DataRow) - PopulateOutputParameters(); + { + Debug.Assert(RowDescription != null); + Debug.Assert(State == ReaderState.BeforeResult); + + try + { + // Temporarily set our state to InResult and non-sequential to allow us to read the values, and in any order. + var isSequential = _isSequential; + var currentPosition = Buffer.ReadPosition; + State = ReaderState.InResult; + _isSequential = false; + try + { + command.PopulateOutputParameters(this, _commandLogger); + + // On success we want to revert any row and column state for the user to be able to read the same row again. + if (async) + await PgReader.CommitAsync().ConfigureAwait(false); + else + PgReader.Commit(); + + State = ReaderState.BeforeResult; // Set the state back + Buffer.ReadPosition = currentPosition; // Restore position + _column = -1; + } + finally + { + // To be on the safe side we always revert this CommandBehavior state change, including on failure. + _isSequential = isSequential; + } + } + catch (Exception e) + { + // TODO: ideally we should flow down to global exception filter and consume there + await Consume(async, firstException: e).ConfigureAwait(false); + throw; + } + } } else { - msg = await ReadMessage(async); + msg = await ReadMessage(async).ConfigureAwait(false); ProcessMessage(msg); } switch (msg.Code) { case BackendMessageCode.DataRow: + Connector.State = ConnectorState.Fetching; return true; case BackendMessageCode.CommandComplete: if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); return true; default: - throw Connector.UnexpectedMessageReceived(msg.Code); + Connector.UnexpectedMessageReceived(msg.Code); + break; } } // There are no more queries, we're done. Read the RFQ. - if (_statements.Count == 0 || !(_statements[_statements.Count - 1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) - Expect(await Connector.ReadMessage(async), Connector); + if (_statements.Count is 0 || !(_statements[^1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); State = ReaderState.Consumed; RowDescription = null; @@ -545,15 +553,14 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo var statement = _statements[StatementIndex]; // Reference the triggering statement from the exception - postgresException.BatchCommand = statement; + if (Connector.Settings.IncludeFailedBatchedCommand) + postgresException.BatchCommand = statement; // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may // already be using the recycled instance. - if (!Command.IsWrappedByBatch) - { - Command.IsCached = false; - } + // TODO: we probably should do than even if it's not PostgresException (error from PopulateOutputParameters) + Command.IsCacheable = false; // If the schema of a table changes after a statement is prepared on that table, PostgreSQL errors with // 0A000: cached plan must not change result type. 0A000 seems like a non-specific code, but it's very unlikely the @@ -581,6 +588,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // However, if the command has error barrier, we now have to consume results from the commands after it (unless it's the // last one). // Note that Consume calls NextResult (this method) recursively, the isConsuming flag tells us we're in this mode. + // TODO: We might as well call Consume on every command (even the last one) to make sure we do read every single message until RFQ + // in case we get an exception in the middle of NextResult if ((statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) && StatementIndex < _statements.Count - 1) { if (isConsuming) @@ -596,61 +605,46 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // We provide Consume with the first exception which we've just caught. // If it encounters other exceptions while consuming the rest of the result set, it will raise an AggregateException, // otherwise it will rethrow this first exception. - await Consume(async, firstException: e); + await Consume(async, firstException: e).ConfigureAwait(false); break; // Never reached, Consume always throws above } } } - State = ReaderState.Consumed; + // Break may have progressed the reader already. + if (State is not ReaderState.Closed) + State = ReaderState.Consumed; throw; } - } - void PopulateOutputParameters() - { - // The first row in a stored procedure command that has output parameters needs to be traversed twice - - // once for populating the output parameters and once for the actual result set traversal. So in this - // case we can't be sequential. - Debug.Assert(Command.Parameters.Any(p => p.IsOutputDirection)); - Debug.Assert(StatementIndex == 0); - Debug.Assert(RowDescription != null); - Debug.Assert(State == ReaderState.BeforeResult); - - var currentPosition = Buffer.ReadPosition; - - // Temporarily set our state to InResult to allow us to read the values - State = ReaderState.InResult; - - var pending = new Queue(); - var taken = new List(); - for (var i = 0; i < FieldCount; i++) + async ValueTask ConsumeResultSet(bool async) { - if (Command.Parameters.TryGetValue(GetName(i), out var p) && p.IsOutputDirection) + await ConsumeRow(async).ConfigureAwait(false); + while (true) { - p.Value = GetValue(i); - taken.Add(p); - } - else - pending.Enqueue(GetValue(i)); - } + var completedMsg = await Connector.ReadMessage(async, DataRowLoadingMode.Skip).ConfigureAwait(false); + switch (completedMsg.Code) + { + case BackendMessageCode.CommandComplete: + case BackendMessageCode.EmptyQueryResponse: + ProcessMessage(completedMsg); - // Not sure where this odd behavior comes from: all output parameters which did not get matched by - // name now get populated with column values which weren't matched. Keeping this for backwards compat, - // opened #2252 for investigation. - foreach (var p in Command.Parameters.Where(p => p.IsOutputDirection && !taken.Contains(p))) - { - if (pending.Count == 0) - break; - p.Value = pending.Dequeue(); - } + var statement = _statements[StatementIndex]; + if (statement.IsPrepared && ConversionContextCache is not null) + RowDescription!.SetColumnInfoCache(new(ConversionContextCache, 0, ColumnCount)); - State = ReaderState.BeforeResult; // Set the state back - Buffer.ReadPosition = currentPosition; // Restore position + if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); - _column = -1; - ColumnLen = -1; - PosInColumn = 0; + break; + default: + // TODO if we hit an ErrorResponse here (PG doesn't do this *today*) we should probably throw. + continue; + } + + break; + } + } } /// @@ -660,25 +654,13 @@ void PopulateOutputParameters() async Task NextResultSchemaOnly(bool async, bool isConsuming = false, CancellationToken cancellationToken = default) { Debug.Assert(_isSchemaOnly); + if (State is ReaderState.Consumed) + return false; using var registration = isConsuming ? default : Connector.StartNestedCancellableOperation(cancellationToken); try { - switch (State) - { - case ReaderState.BeforeResult: - case ReaderState.InResult: - case ReaderState.BetweenResults: - break; - case ReaderState.Consumed: - case ReaderState.Closed: - case ReaderState.Disposed: - return false; - default: - throw new ArgumentOutOfRangeException(); - } - for (StatementIndex++; StatementIndex < _statements.Count; StatementIndex++) { var statement = _statements[StatementIndex]; @@ -696,13 +678,13 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc Debug.Assert(!pStatement.IsPrepared); if (pStatement.StatementBeingReplaced != null) { - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); pStatement.StatementBeingReplaced.CompleteUnprepare(); pStatement.StatementBeingReplaced = null; } } - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); if (statement.IsPreparing) { @@ -711,8 +693,8 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc statement.IsPreparing = false; } - Expect(await Connector.ReadMessage(async), Connector); - var msg = await Connector.ReadMessage(async); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); + var msg = await Connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.NoData: @@ -720,45 +702,55 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc break; case BackendMessageCode.RowDescription: // We have a resultset - RowDescription = _statements[StatementIndex].Description = (RowDescriptionMessage)msg; + // RowDescription messages are cached on the connector, but if we're auto-preparing, we need to + // clone our own copy which will last beyond the lifetime of this invocation. + RowDescription = _statements[StatementIndex].Description = preparedStatement == null + ? (RowDescriptionMessage)msg + : ((RowDescriptionMessage)msg).Clone(); Command.FixupRowDescription(RowDescription, StatementIndex == 0); break; default: throw Connector.UnexpectedMessageReceived(msg.Code); } + + var forall = true; + for (var i = StatementIndex + 1; i < _statements.Count; i++) + if (!_statements[i].IsPrepared) + { + forall = false; + break; + } + // There are no more queries, we're done. Read to the RFQ. + if (forall) + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); } // Found a resultset - if (RowDescription != null) + if (RowDescription is not null) return true; } - // There are no more queries, we're done. Read to the RFQ. - if (!_statements.All(s => s.IsPrepared)) - { - Expect(await Connector.ReadMessage(async), Connector); - RowDescription = null; - State = ReaderState.Consumed; - } - + State = ReaderState.Consumed; + RowDescription = null; return false; } catch (Exception e) { - State = ReaderState.Consumed; + // Break may have progressed the reader already. + if (State is not ReaderState.Closed) + State = ReaderState.Consumed; // Reference the triggering statement from the exception if (e is PostgresException postgresException && StatementIndex >= 0 && StatementIndex < _statements.Count) { - postgresException.BatchCommand = _statements[StatementIndex]; + // Reference the triggering statement from the exception + if (Connector.Settings.IncludeFailedBatchedCommand) + postgresException.BatchCommand = _statements[StatementIndex]; - // Prevent the command or batch from by recycled (by the connection) when it's disposed. This is important since + // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may // already be using the recycled instance. - if (!Command.IsWrappedByBatch) - { - Command.IsCached = false; - } + Command.IsCacheable = false; } // An error means all subsequent statements were skipped by PostgreSQL. @@ -784,77 +776,44 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc internal void ProcessMessage(IBackendMessage msg) { - switch (msg.Code) + if (msg.Code is not BackendMessageCode.DataRow) { - case BackendMessageCode.DataRow: - ProcessDataRowMessage((DataRowMessage)msg); - return; - - case BackendMessageCode.CommandComplete: - var completed = (CommandCompleteMessage)msg; - switch (completed.StatementType) - { - case StatementType.Update: - case StatementType.Insert: - case StatementType.Delete: - case StatementType.Copy: - case StatementType.Move: - case StatementType.Merge: - if (!_recordsAffected.HasValue) - _recordsAffected = 0; - _recordsAffected += completed.Rows; - break; - } - - _statements[StatementIndex].ApplyCommandComplete(completed); - goto case BackendMessageCode.EmptyQueryResponse; - - case BackendMessageCode.EmptyQueryResponse: - State = ReaderState.BetweenResults; + HandleUncommon(msg); return; - - default: - throw new Exception("Received unexpected backend message of type " + msg.Code); } - } - - void ProcessDataRowMessage(DataRowMessage msg) - { - Connector.State = ConnectorState.Fetching; + var dataRow = (DataRowMessage)msg; // The connector's buffer can actually change between DataRows: // If a large DataRow exceeding the connector's current read buffer arrives, and we're // reading in non-sequential mode, a new oversize buffer is allocated. We thus have to // recapture the connector's buffer on each new DataRow. // Note that this can happen even in sequential mode, if the row description message is big // (see #2003) - Buffer = Connector.ReadBuffer; + if (!ReferenceEquals(Buffer, Connector.ReadBuffer)) + Buffer = Connector.ReadBuffer; - _hasRows = true; - _column = -1; - ColumnLen = -1; - PosInColumn = 0; + Buffer.PgReader.StreamCanSeek = !_isSequential; // We assume that the row's number of columns is identical to the description's - _numColumns = Buffer.ReadInt16(); - Debug.Assert(_numColumns == RowDescription!.Count, - $"Row's number of columns ({_numColumns}) differs from the row description's ({RowDescription.Count})"); - - _dataMsgEnd = Buffer.ReadPosition + msg.Length - 2; - _canConsumeRowNonSequentially = Buffer.ReadBytesLeft >= msg.Length - 2; + var numColumns = Buffer.ReadInt16(); + if (ColumnCount != numColumns) + ThrowHelper.ThrowArgumentException($"Row's number of columns ({numColumns}) differs from the row description's ({ColumnCount})"); + + var readPosition = Buffer.ReadPosition; + var msgRemainder = dataRow.Length - sizeof(short); + _dataMsgEnd = readPosition + msgRemainder; + _columnsStartPos = readPosition; + _isRowBuffered = msgRemainder <= Buffer.FilledBytes - readPosition; + Debug.Assert(_isRowBuffered || _isSequential); + _column = -1; - if (!_isSequential) - { - Debug.Assert(_canConsumeRowNonSequentially); - // Initialize our columns array with the offset and length of the first column + if (_columns.Count > 0) _columns.Clear(); - var len = Buffer.ReadInt32(); - _columns.Add((Buffer.ReadPosition, len)); - } switch (State) { case ReaderState.BetweenResults: + _hasRows = true; State = ReaderState.BeforeResult; break; case ReaderState.BeforeResult: @@ -863,14 +822,45 @@ void ProcessDataRowMessage(DataRowMessage msg) case ReaderState.InResult: break; default: - throw Connector.UnexpectedMessageReceived(BackendMessageCode.DataRow); + Connector.UnexpectedMessageReceived(BackendMessageCode.DataRow); + break; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + void HandleUncommon(IBackendMessage msg) + { + switch (msg.Code) + { + case BackendMessageCode.CommandComplete: + var completed = (CommandCompleteMessage)msg; + switch (completed.StatementType) + { + case StatementType.Update: + case StatementType.Insert: + case StatementType.Delete: + case StatementType.Copy: + case StatementType.Move: + case StatementType.Merge: + _recordsAffected ??= 0; + _recordsAffected += completed.Rows; + break; + } + + _statements[StatementIndex].ApplyCommandComplete(completed); + State = ReaderState.BetweenResults; + break; + case BackendMessageCode.EmptyQueryResponse: + State = ReaderState.BetweenResults; + break; + default: + Connector.UnexpectedMessageReceived(msg.Code); + break; + } } } #endregion - void Cancel() => Connector.PerformPostgresCancellation(); - /// /// Gets a value indicating the depth of nesting for the current row. Always returns zero. /// @@ -879,7 +869,7 @@ void ProcessDataRowMessage(DataRowMessage msg) /// /// Gets a value indicating whether the data reader is closed. /// - public override bool IsClosed => State == ReaderState.Closed || State == ReaderState.Disposed; + public override bool IsClosed => State is ReaderState.Closed or ReaderState.Disposed; /// /// Gets the number of rows changed, inserted, or deleted by execution of the SQL statement. @@ -915,18 +905,26 @@ public override int RecordsAffected /// which exposes an aggregation across all statements. /// [Obsolete("Use the new DbBatch API")] - public IReadOnlyList Statements => _statements.AsReadOnly(); + public IReadOnlyList Statements + { + get + { + ThrowIfClosedOrDisposed(); + return _statements.AsReadOnly(); + } + } /// /// Gets a value that indicates whether this DbDataReader contains one or more rows. /// public override bool HasRows - => State switch + { + get { - ReaderState.Closed => throw new InvalidOperationException("Invalid attempt to call HasRows when reader is closed."), - ReaderState.Disposed => throw new ObjectDisposedException(nameof(NpgsqlDataReader)), - _ => _hasRows - }; + ThrowIfClosedOrDisposed(); + return _hasRows; + } + } /// /// Indicates whether the reader is currently positioned on a row, i.e. whether reading a @@ -935,7 +933,14 @@ public override bool HasRows /// return true even if attempting to read a column will fail, e.g. before /// has been called /// - public bool IsOnRow => State == ReaderState.InResult; + public bool IsOnRow + { + get + { + ThrowIfClosedOrDisposed(); + return State is ReaderState.InResult; + } + } /// /// Gets the name of the column, given the zero-based column ordinal. @@ -951,7 +956,7 @@ public override int FieldCount { get { - CheckClosedOrDisposed(); + ThrowIfClosedOrDisposed(); return RowDescription?.Count ?? 0; } } @@ -968,20 +973,29 @@ async Task Consume(bool async, Exception? firstException = null) // Skip over the other result sets. Note that this does tally records affected from CommandComplete messages, and properly sets // state for auto-prepared statements - while (true) + // + // The only exception is when the connector is broken (which can happen in the middle of consuming) + // As then there is no point in going forward. + // An exception to the exception above is when connector is concurrently closed while + // the reader is still going over the result set. + // While this is undefined behavior and user error, we should try to at least do our best to not loop indefinitely. + // + // While we can also check our local state (State == Closed) + // It's probably better to rely on connector since it's private and its state can't be changed + while (Connector.IsConnected) { try { if (!(_isSchemaOnly - ? await NextResultSchemaOnly(async, isConsuming: true) - : await NextResult(async, isConsuming: true))) + ? await NextResultSchemaOnly(async, isConsuming: true).ConfigureAwait(false) + : await NextResult(async, isConsuming: true).ConfigureAwait(false))) { break; } } catch (Exception e) { - exceptions ??= new(); + exceptions ??= []; exceptions.Add(e); } } @@ -1014,11 +1028,10 @@ protected override void Dispose(bool disposing) catch (Exception ex) { // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set - // to Disposed in Close above; in multiplexing, we also unbind the connector (with its reader), and at that point it can be used - // by other consumers. Therefore, we only set the state fo Disposed if the exception *wasn't* a PostgresException. + // to Disposed in Close above. Therefore, we only set the state to Disposed if the exception *wasn't* a PostgresException. if (!(ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && - aggregateException.InnerExceptions.All(e => e is PostgresException))) + AllPostgresExceptions(aggregateException.InnerExceptions))) { State = ReaderState.Disposed; } @@ -1034,43 +1047,38 @@ protected override void Dispose(bool disposing) /// /// Releases the resources used by the . /// -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else - public override ValueTask DisposeAsync() -#endif + public override async ValueTask DisposeAsync() { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncCore(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - async ValueTask DisposeAsyncCore() + try { - try - { - await Close(connectionClosing: false, async: true, isDisposing: true); - } - catch (Exception ex) - { - // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set - // to Disposed in Close above; in multiplexing, we also unbind the connector (with its reader), and at that point it can be used - // by other consumers. Therefore, we only set the state fo Disposed if the exception *wasn't* a PostgresException. - if (!(ex is PostgresException || - ex is NpgsqlException { InnerException: AggregateException aggregateException } && - aggregateException.InnerExceptions.All(e => e is PostgresException))) - { - State = ReaderState.Disposed; - } - - throw; - } - finally + await Close(connectionClosing: false, async: true, isDisposing: true).ConfigureAwait(false); + } + catch (Exception ex) + { + // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set + // to Disposed in Close above. Therefore, we only set the state to Disposed if the exception *wasn't* a PostgresException. + if (!(ex is PostgresException || + ex is NpgsqlException { InnerException: AggregateException aggregateException } && + AllPostgresExceptions(aggregateException.InnerExceptions))) { - Command.TraceCommandStop(); + State = ReaderState.Disposed; } + throw; + } + finally + { + Command.TraceCommandStop(); } } + static bool AllPostgresExceptions(ReadOnlyCollection collection) + { + foreach (var exception in collection) + if (exception is not PostgresException) + return false; + return true; + } + /// /// Closes the reader, allowing a new command to be executed. /// @@ -1079,17 +1087,10 @@ async ValueTask DisposeAsyncCore() /// /// Closes the reader, allowing a new command to be executed. /// -#if NETSTANDARD2_0 - public Task CloseAsync() -#else public override Task CloseAsync() -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Close(connectionClosing: false, async: true, isDisposing: false); - } + => Close(async: true, connectionClosing: false, isDisposing: false); - internal async Task Close(bool connectionClosing, bool async, bool isDisposing) + internal async Task Close(bool async, bool connectionClosing, bool isDisposing) { if (State is ReaderState.Closed or ReaderState.Disposed) { @@ -1111,20 +1112,20 @@ internal async Task Close(bool connectionClosing, bool async, bool isDisposing) { try { - await Consume(async); + await Consume(async).ConfigureAwait(false); } - catch (Exception ex) when (ex is OperationCanceledException or NpgsqlException { InnerException : TimeoutException }) + catch (Exception ex) when (ex is OperationCanceledException or NpgsqlException { InnerException: TimeoutException }) { // Timeout/cancellation - completely normal, consume has basically completed. } catch (Exception ex) when ( ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && - aggregateException.InnerExceptions.All(e => e is PostgresException)) + AllPostgresExceptions(aggregateException.InnerExceptions)) { // In the case of a PostgresException (or multiple ones, if we have error barriers), the connection is fine and consume // has basically completed. Defer throwing the exception until Cleanup is complete. - await Cleanup(async, connectionClosing, isDisposing); + await Cleanup(async, connectionClosing, isDisposing).ConfigureAwait(false); throw; } catch @@ -1146,66 +1147,73 @@ ex is PostgresException || throw new ArgumentOutOfRangeException(); } - await Cleanup(async, connectionClosing, isDisposing); + await Cleanup(async, connectionClosing, isDisposing).ConfigureAwait(false); } internal async Task Cleanup(bool async, bool connectionClosing = false, bool isDisposing = false) { LogMessages.ReaderCleanup(_commandLogger, Connector.Id); - // If multiplexing isn't on, _sendTask contains the task for the writing of this command. + // _sendTask contains the task for the writing of this command. // Make sure that this task, which may have executed asynchronously and in parallel with the reading, - // has completed, throwing any exceptions it generated. - // Note: if the following is removed, mysterious concurrent connection usage errors start happening - // on .NET Framework. - if (_sendTask != null) - { - try + // has completed, throwing any exceptions it generated. If we don't do this, there's the possibility of a race condition where the + // user executes a new command after reader.Dispose() returns, but some additional write stuff is still finishing up from the last + // command. + if (_sendTask is { Status: not TaskStatus.RanToCompletion }) + { + // If the connector is broken, we have no reason to wait for the sendTask to complete + // as we're not going to send anything else over it + // and that can lead to deadlocks (concurrent write and read failure, see #4804) + if (Connector.IsBroken) { - if (async) - await _sendTask; - else - _sendTask.GetAwaiter().GetResult(); + // Prevent unobserved Task notifications by observing the failed Task exception. + _ = _sendTask.ContinueWith(t => _ = t.Exception, CancellationToken.None, TaskContinuationOptions.OnlyOnFaulted, TaskScheduler.Current); } - catch (Exception e) + else { - // TODO: think of a better way to handle exceptions, see #1323 and #3163 - _commandLogger.LogDebug(e, "Exception caught while sending the request", Connector.Id); - } - } - + try + { + if (async) + await _sendTask.ConfigureAwait(false); + else + _sendTask.GetAwaiter().GetResult(); + } + catch (Exception e) + { + // TODO: think of a better way to handle exceptions, see #1323 and #3163 + _commandLogger.LogDebug(e, "Exception caught while sending the request", Connector.Id); + } + } + } + + if (ConversionContextCache is { } cache) + { + ConversionContextCache = null; + ArrayPool.Shared.Return(cache, clearArray: true); + } + + // Drop any reference to a potential oversized buffer. + Buffer = null!; + // Reset reader so the next command doesn't inherit our setup. + if (!Connector.IsBroken) + Connector.ReadBuffer.PgReader.Reset(); + State = ReaderState.Closed; Command.State = CommandState.Idle; Connector.CurrentReader = null; if (_commandLogger.IsEnabled(LogLevel.Information)) Command.LogExecutingCompleted(Connector, executing: false); NpgsqlEventSource.Log.CommandStop(); + Connector.DataSource.MetricsReporter.ReportCommandStop(_startTimestamp); Connector.EndUserAction(); - // The reader shouldn't be unbound, if we're disposing - so the state is set prematurely if (isDisposing) State = ReaderState.Disposed; - if (_connection?.ConnectorBindingScope == ConnectorBindingScope.Reader) - { - UnbindIfNecessary(); - - // TODO: Refactor... Use proper scope - _connection.Connector = null; - Connector.Connection = null; - _connection.ConnectorBindingScope = ConnectorBindingScope.None; - - // If the reader is being closed as part of the connection closing, we don't apply - // the reader's CommandBehavior.CloseConnection - if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) - _connection.Close(); - - Connector.ReaderCompleted.SetResult(null); - } - else if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) + if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) { Debug.Assert(_connection is not null); - _connection.Close(); + await _connection.Close(async).ConfigureAwait(false); } if (ReaderClosed != null) @@ -1224,84 +1232,84 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD /// /// The zero-based column ordinal. /// The value of the specified column. - public override bool GetBoolean(int ordinal) => GetFieldValue(ordinal); + public override bool GetBoolean(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a byte. /// /// The zero-based column ordinal. /// The value of the specified column. - public override byte GetByte(int ordinal) => GetFieldValue(ordinal); + public override byte GetByte(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a single character. /// /// The zero-based column ordinal. /// The value of the specified column. - public override char GetChar(int ordinal) => GetFieldValue(ordinal); + public override char GetChar(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a 16-bit signed integer. /// /// The zero-based column ordinal. /// The value of the specified column. - public override short GetInt16(int ordinal) => GetFieldValue(ordinal); + public override short GetInt16(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a 32-bit signed integer. /// /// The zero-based column ordinal. /// The value of the specified column. - public override int GetInt32(int ordinal) => GetFieldValue(ordinal); + public override int GetInt32(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a 64-bit signed integer. /// /// The zero-based column ordinal. /// The value of the specified column. - public override long GetInt64(int ordinal) => GetFieldValue(ordinal); + public override long GetInt64(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a object. /// /// The zero-based column ordinal. /// The value of the specified column. - public override DateTime GetDateTime(int ordinal) => GetFieldValue(ordinal); + public override DateTime GetDateTime(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as an instance of . /// /// The zero-based column ordinal. /// The value of the specified column. - public override string GetString(int ordinal) => GetFieldValue(ordinal); + public override string GetString(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a object. /// /// The zero-based column ordinal. /// The value of the specified column. - public override decimal GetDecimal(int ordinal) => GetFieldValue(ordinal); + public override decimal GetDecimal(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a double-precision floating point number. /// /// The zero-based column ordinal. /// The value of the specified column. - public override double GetDouble(int ordinal) => GetFieldValue(ordinal); + public override double GetDouble(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a single-precision floating point number. /// /// The zero-based column ordinal. /// The value of the specified column. - public override float GetFloat(int ordinal) => GetFieldValue(ordinal); + public override float GetFloat(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a globally-unique identifier (GUID). /// /// The zero-based column ordinal. /// The value of the specified column. - public override Guid GetGuid(int ordinal) => GetFieldValue(ordinal); + public override Guid GetGuid(int ordinal) => GetFieldValueCore(ordinal); /// /// Populates an array of objects with the column values of the current row. @@ -1310,11 +1318,10 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD /// The number of instances of in the array. public override int GetValues(object[] values) { - if (values == null) - throw new ArgumentNullException(nameof(values)); - CheckResultSet(); + ThrowIfNotInResult(); + ArgumentNullException.ThrowIfNull(values); - var count = Math.Min(FieldCount, values.Length); + var count = Math.Min(ColumnCount, values.Length); for (var i = 0; i < count; i++) values[i] = GetValue(i); return count; @@ -1335,57 +1342,61 @@ public override int GetValues(object[] values) /// Gets the value of the specified column as a TimeSpan, /// /// - /// PostgreSQL's interval type has has a resolution of 1 microsecond and ranges from + /// PostgreSQL's interval type has a resolution of 1 microsecond and ranges from /// -178000000 to 178000000 years, while .NET's TimeSpan has a resolution of 100 nanoseconds /// and ranges from roughly -29247 to 29247 years. /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html /// /// The zero-based column ordinal. /// The value of the specified column. - public TimeSpan GetTimeSpan(int ordinal) => GetFieldValue(ordinal); + public TimeSpan GetTimeSpan(int ordinal) => GetFieldValueCore(ordinal); /// protected override DbDataReader GetDbDataReader(int ordinal) => GetData(ordinal); /// /// Returns a nested data reader for the requested column. - /// The column type must be a record or a to Npgsql known composite type, or an array thereof. + /// The column type must be a record or a Npgsql known composite type, or an array thereof. /// Currently only supported in non-sequential mode. /// /// The zero-based column ordinal. /// A data reader. public new NpgsqlNestedDataReader GetData(int ordinal) { - var field = CheckRowAndGetField(ordinal); + ThrowIfNotInResult(); + var field = RowDescription[ordinal]; + if (_isSequential) + ThrowHelper.ThrowNotSupportedException("GetData() not supported in sequential mode."); + var type = field.PostgresType; var isArray = type is PostgresArrayType; var elementType = isArray ? ((PostgresArrayType)type).Element : type; var compositeType = elementType as PostgresCompositeType; - if (elementType.InternalName != "record" && compositeType == null) - throw new InvalidCastException("GetData() not supported for type " + field.TypeDisplayName); + if (field.DataFormat is DataFormat.Text || (elementType.InternalName != "record" && compositeType == null)) + ThrowHelper.ThrowInvalidCastException("GetData() not supported for type " + field.TypeDisplayName); - SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen == -1) + if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is DbNullSentinel) ThrowHelper.ThrowInvalidCastException_NoValue(field); - if (_isSequential) - throw new NotSupportedException("GetData() not supported in sequential mode."); + Debug.Assert(!PgReader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); + var reader = PgReader; + reader.Seek(0); - var reader = CachedFreeNestedDataReader; - if (reader != null) + var nestedReader = CachedFreeNestedDataReader; + if (nestedReader != null) { CachedFreeNestedDataReader = null; - reader.Init(UniqueRowId, compositeType); + nestedReader.Init(compositeType); } else { - reader = new NpgsqlNestedDataReader(this, null, UniqueRowId, 1, compositeType); + nestedReader = new NpgsqlNestedDataReader(this, null, 1, compositeType); } if (isArray) - reader.InitArray(); + nestedReader.InitArray(); else - reader.InitSingleRow(); - return reader; + nestedReader.InitSingleRow(); + return nestedReader; } #endregion @@ -1403,42 +1414,34 @@ public override int GetValues(object[] values) /// The actual number of bytes read. public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) { - if (dataOffset < 0 || dataOffset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); + ThrowIfNotInResult(); + var field = RowDescription[ordinal]; + + if (dataOffset is < 0 or > int.MaxValue) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between {0} and {(buffer.Length)}"); + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between {0} and {buffer.Length - bufferOffset}"); + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - var field = CheckRowAndGetField(ordinal); - var handler = field.Handler; - if (!(handler is ByteaHandler)) - throw new InvalidCastException("GetBytes() not supported for type " + field.Name); - - SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen == -1) + if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is var columnLength && columnLength is DbNullSentinel) ThrowHelper.ThrowInvalidCastException_NoValue(field); - if (buffer == null) - return ColumnLen; + if (buffer is null) + return columnLength; - var dataOffset2 = (int)dataOffset; - SeekInColumn(dataOffset2, false).GetAwaiter().GetResult(); + // Check whether any sequential seek is contractually sound (even though we might be able to satisfy rewinds we make sure we won't). + var reader = PgReader; + if (_isSequential && reader.IsFieldPastOffset((int)dataOffset)) + ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - // Attempt to read beyond the end of the column - if (dataOffset2 + length > ColumnLen) - length = Math.Max(ColumnLen - dataOffset2, 0); - - var left = length; - while (left > 0) - { - var read = Buffer.Read(new Span(buffer, bufferOffset, left)); - bufferOffset += read; - left -= read; - } - - PosInColumn += length; + // Move to offset + Debug.Assert(!reader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); + reader.Seek((int)dataOffset); + // At offset, read into buffer. + length = Math.Min(length, reader.CurrentRemaining); + reader.ReadBytes(new Span(buffer, bufferOffset, length)); return length; } @@ -1447,7 +1450,8 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int /// /// The zero-based column ordinal. /// The returned object. - public override Stream GetStream(int ordinal) => GetStream(ordinal, false).Result; + public override Stream GetStream(int ordinal) + => GetFieldValueCore(ordinal); /// /// Retrieves data as a . @@ -1458,31 +1462,7 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int /// /// The returned object. public Task GetStreamAsync(int ordinal, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return GetStream(ordinal, true, cancellationToken).AsTask(); - } - - ValueTask GetStream(int ordinal, bool async, CancellationToken cancellationToken = default) => - GetStreamInternal(CheckRowAndGetField(ordinal), ordinal, async, cancellationToken); - - async ValueTask GetStreamInternal(FieldDescription field, int ordinal, bool async, CancellationToken cancellationToken = default) - { - if (_columnStream is { IsDisposed: false }) - throw new InvalidOperationException("A stream is already open for this reader"); - - using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - await SeekToColumn(ordinal, async, cancellationToken); - if (_isSequential) - CheckColumnStart(); - - if (ColumnLen == -1) - ThrowHelper.ThrowInvalidCastException_NoValue(field); - - PosInColumn += ColumnLen; - return _columnStream = (NpgsqlReadBuffer.ColumnStream)Buffer.GetStream(ColumnLen, !_isSequential); - } + => GetFieldValueAsync(ordinal, cancellationToken); #endregion @@ -1499,106 +1479,31 @@ async ValueTask GetStreamInternal(FieldDescription field, int ordinal, b /// The actual number of characters read. public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) { - if (dataOffset < 0 || dataOffset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); - if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between {0} and {(buffer.Length)}"); - if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between {0} and {buffer.Length - bufferOffset}"); - - var field = CheckRowAndGetField(ordinal); - var handler = field.Handler as TextHandler; - if (handler == null) - throw new InvalidCastException("The GetChars method is not supported for type " + field.Name); - - SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen == -1) - ThrowHelper.ThrowInvalidCastException_NoValue(field); - - if (PosInColumn == 0) - _charPos = 0; - - var decoder = Buffer.TextEncoding.GetDecoder(); - - if (buffer == null) - { - // Note: Getting the length of a text column means decoding the entire field, - // very inefficient and also consumes the column in sequential mode. But this seems to - // be SqlClient's behavior as well. - var (bytesSkipped, charsSkipped) = SkipChars(decoder, int.MaxValue, ColumnLen - PosInColumn); - Debug.Assert(bytesSkipped == ColumnLen - PosInColumn); - PosInColumn += bytesSkipped; - _charPos += charsSkipped; - return _charPos; - } - - if (PosInColumn == ColumnLen || dataOffset < _charPos) - { - // Either the column has already been read (e.g. GetString()) or a previous GetChars() - // has positioned us in the column *after* the requested read start offset. Seek back - // (this will throw for sequential) - SeekInColumn(0, false).GetAwaiter().GetResult(); - _charPos = 0; - } - - if (dataOffset > _charPos) - { - var charsToSkip = (int)dataOffset - _charPos; - var (bytesSkipped, charsSkipped) = SkipChars(decoder, charsToSkip, ColumnLen - PosInColumn); - decoder.Reset(); - PosInColumn += bytesSkipped; - _charPos += charsSkipped; - if (charsSkipped < charsToSkip) // data offset is beyond the column's end - return 0; - } + ThrowIfNotInResult(); - // We're now positioned at the start of the segment of characters we need to read. - if (length == 0) - return 0; + // Check whether we have a GetChars implementation for this column type. + var context = GetConversionContext(ordinal, typeof(GetChars)); - var (bytesRead, charsRead) = DecodeChars(decoder, buffer, bufferOffset, length, ColumnLen - PosInColumn); + if (dataOffset is < 0 or > int.MaxValue) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); + if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length); + if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - PosInColumn += bytesRead; - _charPos += charsRead; - return charsRead; - } + var reader = PgReader; + if (SeekToColumn(ordinal, context.Binding.DataFormat, resumableOp: true) is DbNullSentinel) + ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription[ordinal]); - (int BytesRead, int CharsRead) DecodeChars(Decoder decoder, char[] output, int outputOffset, int charCount, int byteCount) - { - var (bytesRead, charsRead) = (0, 0); + dataOffset = buffer is null ? 0 : dataOffset; + if (_isSequential && reader.GetCharsRead > dataOffset) + ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - while (true) - { - Buffer.Ensure(1); // Make sure we have at least some data - - var maxBytes = Math.Min(byteCount - bytesRead, Buffer.ReadBytesLeft); - decoder.Convert(Buffer.Buffer, Buffer.ReadPosition, maxBytes, output, outputOffset, charCount - charsRead, false, - out var bytesUsed, out var charsUsed, out _); - Buffer.ReadPosition += bytesUsed; - bytesRead += bytesUsed; - charsRead += charsUsed; - if (charsRead == charCount || bytesRead == byteCount) - break; - outputOffset += charsUsed; - Buffer.Clear(); - } - - return (bytesRead, charsRead); - } - - internal (int BytesSkipped, int CharsSkipped) SkipChars(Decoder decoder, int charCount, int byteCount) - { - // TODO: Allocate on the stack with Span - if (_tempCharBuf == null) - _tempCharBuf = new char[1024]; - var (charsSkipped, bytesSkipped) = (0, 0); - while (charsSkipped < charCount && bytesSkipped < byteCount) - { - var (bytesRead, charsRead) = DecodeChars(decoder, _tempCharBuf, 0, Math.Min(charCount, _tempCharBuf.Length), byteCount); - bytesSkipped += bytesRead; - charsSkipped += charsRead; - } - return (bytesSkipped, charsSkipped); + reader.StartCharsRead(checked((int)dataOffset), + buffer is not null ? new ArraySegment(buffer, bufferOffset, length) : (ArraySegment?)null); + var result = context.TypeInfo.ReadFieldValue(reader, context.Binding); + reader.EndCharsRead(); + return result.Read; } /// @@ -1607,7 +1512,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int /// The zero-based column ordinal. /// The returned object. public override TextReader GetTextReader(int ordinal) - => GetTextReader(ordinal, false).Result; + => GetFieldValueCore(ordinal); /// /// Retrieves data as a . @@ -1618,25 +1523,7 @@ public override TextReader GetTextReader(int ordinal) /// /// The returned object. public Task GetTextReaderAsync(int ordinal, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return GetTextReader(ordinal, true, cancellationToken).AsTask(); - } - - async ValueTask GetTextReader(int ordinal, bool async, CancellationToken cancellationToken = default) - { - var field = CheckRowAndGetField(ordinal); - - if (field.Handler is ITextReaderHandler handler) - { - var stream = async - ? await GetStreamInternal(field, ordinal, true, cancellationToken) - : GetStreamInternal(field, ordinal, false, CancellationToken.None).Result; - return handler.GetTextReader(stream, Buffer); - } - - throw new InvalidCastException($"The GetTextReader method is not supported for type {field.Handler.PgDisplayName}"); - } + => GetFieldValueAsync(ordinal, cancellationToken); #endregion @@ -1653,237 +1540,62 @@ async ValueTask GetTextReader(int ordinal, bool async, CancellationT /// public override Task GetFieldValueAsync(int ordinal, CancellationToken cancellationToken) { - if (typeof(T) == typeof(Stream)) - return (Task)(object)GetStreamAsync(ordinal, cancellationToken); - - if (typeof(T) == typeof(TextReader)) - return (Task)(object)GetTextReaderAsync(ordinal, cancellationToken); - - // In non-sequential, we know that the column is already buffered - no I/O will take place - if (!_isSequential) - return Task.FromResult(GetFieldValue(ordinal)); - - using (NoSynchronizationContextScope.Enter()) - return GetFieldValueSequential(ordinal, true, cancellationToken).AsTask(); - } - - /// - /// Synchronously gets the value of the specified column as a type. - /// - /// Synchronously gets the value of the specified column as a type. - /// The column to be retrieved. - /// The column to be retrieved. - public override T GetFieldValue(int ordinal) - { - if (typeof(T) == typeof(Stream)) - return (T)(object)GetStream(ordinal); - - if (typeof(T) == typeof(TextReader)) - return (T)(object)GetTextReader(ordinal); + // As the row is buffered we know the column is too - no I/O will take place + if (!_isRowBuffered) + return Core(ordinal, cancellationToken); - if (_isSequential) - return GetFieldValueSequential(ordinal, false).GetAwaiter().GetResult(); - - // In non-sequential, we know that the column is already buffered - no I/O will take place - - var field = CheckRowAndGetField(ordinal); - SeekToColumnNonSequential(ordinal); - - if (ColumnLen == -1) - { - // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) - return default!; - - if (typeof(T) == typeof(object)) - return (T)(object)DBNull.Value; - - ThrowHelper.ThrowInvalidCastException_NoValue(field); - } - - var position = Buffer.ReadPosition; try { - return NullableHandler.Exists - ? NullableHandler.Read(field.Handler, Buffer, ColumnLen, field) - : typeof(T) == typeof(object) - ? (T)field.Handler.ReadAsObject(Buffer, ColumnLen, field) - : field.Handler.Read(Buffer, ColumnLen, field); + return Task.FromResult(GetFieldValueCore(ordinal)); } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - Buffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - throw; - } - finally + catch (Exception ex) { - // Important: position must still be updated - PosInColumn += ColumnLen; + return Task.FromException(ex); } - } - - async ValueTask GetFieldValueSequential(int column, bool async, CancellationToken cancellationToken = default) - { - using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var field = CheckRowAndGetField(column); - await SeekToColumnSequential(column, async, CancellationToken.None); - CheckColumnStart(); - - if (ColumnLen == -1) + async Task Core(int ordinal, CancellationToken cancellationToken) { - // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) - return default!; + ThrowIfNotInResult(); - if (typeof(T) == typeof(object)) - return (T)(object)DBNull.Value; + var context = GetConversionContext(ordinal, type: typeof(T) == typeof(object) ? null : typeof(T)); - ThrowHelper.ThrowInvalidCastException_NoValue(field); - } + using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var position = Buffer.ReadPosition; - try - { - return NullableHandler.Exists - ? ColumnLen <= Buffer.ReadBytesLeft - ? NullableHandler.Read(field.Handler, Buffer, ColumnLen, field) - : await NullableHandler.ReadAsync(field.Handler, Buffer, ColumnLen, async, field) - : typeof(T) == typeof(object) - ? ColumnLen <= Buffer.ReadBytesLeft - ? (T)field.Handler.ReadAsObject(Buffer, ColumnLen, field) - : (T)await field.Handler.ReadAsObject(Buffer, ColumnLen, async, field) - : ColumnLen <= Buffer.ReadBytesLeft - ? field.Handler.Read(Buffer, ColumnLen, field) - : await field.Handler.Read(Buffer, ColumnLen, async, field); - } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - await Buffer.Skip(remainingBytes, async); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; + var reader = PgReader; + return await SeekToColumnAsync(ordinal, context.Binding.DataFormat).ConfigureAwait(false) is DbNullSentinel + ? DbNullValueOrThrow(ordinal) + : await context.TypeInfo.ReadFieldValueAsync(reader, context.Binding, cancellationToken).ConfigureAwait(false); } } - #endregion - - #region GetValue - /// - /// Gets the value of the specified column as an instance of . + /// Synchronously gets the value of the specified column as a type. /// - /// The zero-based column ordinal. - /// The value of the specified column. - public override object GetValue(int ordinal) - { - var fieldDescription = CheckRowAndGetField(ordinal); - - if (_isSequential) { - SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); - CheckColumnStart(); - } else - SeekToColumnNonSequential(ordinal); + /// Synchronously gets the value of the specified column as a type. + /// The column to be retrieved. + /// The column to be retrieved. + public override T GetFieldValue(int ordinal) => GetFieldValueCore(ordinal); - if (ColumnLen == -1) - return DBNull.Value; + T GetFieldValueCore(int ordinal) + { + ThrowIfNotInResult(); + var context = GetConversionContext(ordinal, type: typeof(T) == typeof(object) ? null : typeof(T)); - object result; - var position = Buffer.ReadPosition; - try - { - result = _isSequential - ? fieldDescription.Handler.ReadAsObject(Buffer, ColumnLen, false, fieldDescription).GetAwaiter().GetResult() - : fieldDescription.Handler.ReadAsObject(Buffer, ColumnLen, fieldDescription); - } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - Buffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; - } + return SeekToColumn(ordinal, context.Binding.DataFormat) is DbNullSentinel + ? DbNullValueOrThrow(ordinal) + : context.TypeInfo.ReadFieldValue(PgReader, context.Binding); + } - // Used for Entity Framework <= 6 compability - var objectResultType = Command.ObjectResultTypes?[ordinal]; - if (objectResultType != null) - { - result = objectResultType == typeof(DateTimeOffset) - ? new DateTimeOffset((DateTime)result) - : Convert.ChangeType(result, objectResultType)!; - } + #endregion - return result; - } + #region GetValue /// /// Gets the value of the specified column as an instance of . /// /// The zero-based column ordinal. /// The value of the specified column. - public override object GetProviderSpecificValue(int ordinal) - { - var fieldDescription = CheckRowAndGetField(ordinal); - - if (_isSequential) - { - SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); - CheckColumnStart(); - } - else - SeekToColumnNonSequential(ordinal); - - if (ColumnLen == -1) - return DBNull.Value; - - var position = Buffer.ReadPosition; - try - { - return _isSequential - ? fieldDescription.Handler.ReadPsvAsObject(Buffer, ColumnLen, false, fieldDescription).GetAwaiter().GetResult() - : fieldDescription.Handler.ReadPsvAsObject(Buffer, ColumnLen, fieldDescription); - } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - Buffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; - } - } + public override object GetValue(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as an instance of . @@ -1903,14 +1615,9 @@ public override object GetProviderSpecificValue(int ordinal) /// true if the specified column is equivalent to ; otherwise false. public override bool IsDBNull(int ordinal) { - CheckRowAndGetField(ordinal); - - if (_isSequential) - SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); - else - SeekToColumnNonSequential(ordinal); - - return ColumnLen == -1; + ThrowIfNotInResult(); + var field = RowDescription[ordinal]; + return SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is DbNullSentinel; } /// @@ -1924,21 +1631,17 @@ public override bool IsDBNull(int ordinal) /// true if the specified column value is equivalent to otherwise false. public override Task IsDBNullAsync(int ordinal, CancellationToken cancellationToken) { - CheckRowAndGetField(ordinal); - - if (!_isSequential) - return IsDBNull(ordinal) ? PGUtil.TrueTask : PGUtil.FalseTask; + if (_isRowBuffered) + return IsDBNull(ordinal) ? TrueTask : FalseTask; - using (NoSynchronizationContextScope.Enter()) - return IsDBNullAsyncInternal(ordinal, cancellationToken); + return Core(ordinal, cancellationToken); - // ReSharper disable once InconsistentNaming - async Task IsDBNullAsyncInternal(int ordinal, CancellationToken cancellationToken) + async Task Core(int ordinal, CancellationToken cancellationToken) { + ThrowIfNotInResult(); + var field = RowDescription[ordinal]; using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - await SeekToColumn(ordinal, true, cancellationToken); - return ColumnLen == -1; + return await SeekToColumnAsync(ordinal, field.DataFormat, resumableOp: true).ConfigureAwait(false) is DbNullSentinel; } } @@ -1953,11 +1656,11 @@ async Task IsDBNullAsyncInternal(int ordinal, CancellationToken cancellati /// The zero-based column ordinal. public override int GetOrdinal(string name) { + ThrowIfClosedOrDisposed(); if (string.IsNullOrEmpty(name)) - throw new ArgumentException("name cannot be empty", nameof(name)); - CheckClosedOrDisposed(); + ThrowHelper.ThrowArgumentException($"{nameof(name)} cannot be empty", nameof(name)); if (RowDescription is null) - throw new InvalidOperationException("No resultset is currently being traversed"); + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); return RowDescription.GetFieldIndex(name); } @@ -1991,42 +1694,11 @@ public override int GetOrdinal(string name) /// /// The zero-based column ordinal. /// The data type of the specified column. + [UnconditionalSuppressMessage("ILLink", "IL2093", + Justification = "Members are only dynamically accessed by Npgsql via GetFieldType by GetSchema, and only in certain cases. " + + "Holding PublicFields and PublicProperties metadata on all our mapped types just for that case is the wrong tradeoff.")] public override Type GetFieldType(int ordinal) - => Command.ObjectResultTypes?[ordinal] - ?? GetField(ordinal).FieldType; - - /// - /// Returns the provider-specific field type of the specified column. - /// - /// The zero-based column ordinal. - /// The Type object that describes the data type of the specified column. - public override Type GetProviderSpecificFieldType(int ordinal) - { - var fieldDescription = GetField(ordinal); - return fieldDescription.Handler.GetProviderSpecificFieldType(fieldDescription); - } - - /// - /// Gets all provider-specific attribute columns in the collection for the current row. - /// - /// An array of Object into which to copy the attribute columns. - /// The number of instances of in the array. - public override int GetProviderSpecificValues(object[] values) - { - if (values == null) - throw new ArgumentNullException(nameof(values)); - if (State != ReaderState.InResult) - { - throw State == ReaderState.Disposed - ? new ObjectDisposedException(nameof(NpgsqlDataReader)) - : new InvalidOperationException("No row is available"); - } - - var count = Math.Min(FieldCount, values.Length); - for (var i = 0; i < count; i++) - values[i] = GetProviderSpecificValue(i); - return count; - } + => GetField(ordinal).FieldType; /// /// Returns an that can be used to iterate through the rows in the data reader. @@ -2040,30 +1712,31 @@ public override IEnumerator GetEnumerator() /// /// public ReadOnlyCollection GetColumnSchema() - => GetColumnSchema(async: false).GetAwaiter().GetResult(); + => GetColumnSchema(async: false).GetAwaiter().GetResult(); ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() - => new(GetColumnSchema().Select(c => (DbColumn)c).ToList()); + { + var columns = GetColumnSchema(); + var result = new DbColumn[columns.Count]; + var i = 0; + foreach (var column in columns) + result[i++] = column; + + return new ReadOnlyCollection(result); + } /// /// Asynchronously returns schema information for the columns in the current resultset. /// /// -#if NET5_0_OR_GREATER - public new Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) -#else - public Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return GetColumnSchema(async: true, cancellationToken); - } + public override Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) + => GetColumnSchema(async: true, cancellationToken); - Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) - => RowDescription == null || RowDescription.Count == 0 - ? Task.FromResult(new List().AsReadOnly()) + Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) where T : DbColumn + => RowDescription == null || ColumnCount == 0 + ? Task.FromResult(new List().AsReadOnly()) : new DbColumnSchemaGenerator(_connection!, RowDescription, _behavior.HasFlag(CommandBehavior.KeyInfo)) - .GetColumnSchema(async, cancellationToken); + .GetColumnSchema(async, cancellationToken); #endregion @@ -2082,18 +1755,10 @@ Task> GetColumnSchema(bool async, Cancellatio /// [UnconditionalSuppressMessage( "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] -#if NET5_0_OR_GREATER public override Task GetSchemaTableAsync(CancellationToken cancellationToken = default) -#else - public Task GetSchemaTableAsync(CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return GetSchemaTable(async: true, cancellationToken); - } + => GetSchemaTable(async: true, cancellationToken); - [UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "typeof(Type).TypeInitializer is not used.")] async Task GetSchemaTable(bool async, CancellationToken cancellationToken = default) { if (FieldCount == 0) // No resultset @@ -2129,13 +1794,13 @@ Task> GetColumnSchema(bool async, Cancellatio table.Columns.Add("ProviderSpecificDataType", typeof(Type)); table.Columns.Add("DataTypeName", typeof(string)); - foreach (var column in await GetColumnSchema(async, cancellationToken)) + foreach (var column in await GetColumnSchema(async, cancellationToken).ConfigureAwait(false)) { var row = table.NewRow(); row["ColumnName"] = column.ColumnName; row["ColumnOrdinal"] = column.ColumnOrdinal ?? -1; - row["ColumnSize"] = column.ColumnSize ?? -1; + row["ColumnSize"] = column.ColumnSize ?? DbNullSentinel; row["NumericPrecision"] = column.NumericPrecision ?? 0; row["NumericScale"] = column.NumericScale ?? 0; row["IsUnique"] = column.IsUnique == true; @@ -2155,6 +1820,7 @@ Task> GetColumnSchema(bool async, Cancellatio row["IsRowVersion"] = false; row["IsHidden"] = column.IsHidden == true; row["IsLong"] = column.IsLong == true; + row["IsReadOnly"] = column.IsReadOnly == true; row["DataTypeName"] = column.DataTypeName; table.Rows.Add(row); @@ -2167,111 +1833,129 @@ Task> GetColumnSchema(bool async, Cancellatio #region Seeking - Task SeekToColumn(int column, bool async, CancellationToken cancellationToken = default) + [MethodImpl(MethodImplOptions.NoInlining)] + int SeekToColumn(int ordinal, DataFormat fieldFormat, bool resumableOp = false) { - if (_isSequential) - return SeekToColumnSequential(column, async, cancellationToken); - SeekToColumnNonSequential(column); - return Task.CompletedTask; + Debug.Assert(_isRowBuffered || _isSequential); + var reader = PgReader; + var column = _column; + + // Column rereading rules for sequential mode: + // * We never allow rereading if the column didn't get initialized as resumable the previous time + // * If it did get initialized as resumable we only allow rereading when either of the following is true: + // - The op is a resumable one again + // - The op isn't resumable but the field is still entirely unconsumed + // Note: this relies on resumable reads (e.g. GetChars) always advancing ReadPosition, + // even when data could be serviced from the buffer, so that FieldAtStart correctly + // reflects whether the column has been read. + if (_isSequential && (column > ordinal || (column == ordinal && (!reader.Resumable || (!resumableOp && !reader.FieldAtStart))))) + ThrowInvalidSequentialSeek(column, ordinal); + + if (column == ordinal) + return reader.Restart(resumableOp); + + reader.Commit(); + var columnLength = BufferSeekToColumn(column, ordinal, !_isRowBuffered); + reader.Init(fieldFormat, columnLength, resumableOp); + return columnLength; + + static void ThrowInvalidSequentialSeek(int column, int ordinal) + => ThrowHelper.ThrowInvalidOperationException( + $"Invalid attempt to read from column ordinal '{ordinal}'. With CommandBehavior.SequentialAccess, " + + $"you may only read from column ordinal '{column}' or greater."); } - void SeekToColumnNonSequential(int column) + ValueTask SeekToColumnAsync(int ordinal, DataFormat fieldFormat, bool resumableOp = false) { - // Shut down any streaming going on on the column - if (_columnStream != null) - { - _columnStream.Dispose(); - _columnStream = null; - } + // When the row is buffered or we're rereading previous data no IO will be done. + if (_isRowBuffered || _column >= ordinal) + return new(SeekToColumn(ordinal, fieldFormat, resumableOp)); + + return Core(ordinal, fieldFormat, resumableOp); - for (var lastColumnRead = _columns.Count; column >= lastColumnRead; lastColumnRead++) + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask Core(int ordinal, DataFormat fieldFormat, bool resumableOp) { - int lastColumnLen; - (Buffer.ReadPosition, lastColumnLen) = _columns[lastColumnRead-1]; - if (lastColumnLen != -1) - Buffer.ReadPosition += lastColumnLen; - var len = Buffer.ReadInt32(); - _columns.Add((Buffer.ReadPosition, len)); - } + Debug.Assert(!_isRowBuffered && _column < ordinal); - (Buffer.ReadPosition, ColumnLen) = _columns[column]; - _column = column; - PosInColumn = 0; + var reader = PgReader; + await reader.CommitAsync().ConfigureAwait(false); + var columnLength = await BufferSeekToColumnAsync(_column, ordinal, !_isRowBuffered).ConfigureAwait(false); + reader.Init(fieldFormat, columnLength, resumableOp); + return columnLength; + } } - /// - /// Seeks to the given column. The 4-byte length is read and stored in . - /// - async Task SeekToColumnSequential(int column, bool async, CancellationToken cancellationToken = default) + int BufferSeekToColumn(int column, int ordinal, bool allowIO) { - if (column < 0 || column >= _numColumns) - throw new IndexOutOfRangeException("Column index out of range"); + Debug.Assert(column < ordinal || !allowIO); + + if (column >= ordinal) + { + _column = ordinal; + return SeekBackwards(ordinal); + } - if (column < _column) - throw new InvalidOperationException($"Invalid attempt to read from column ordinal '{column}'. With CommandBehavior.SequentialAccess, you may only read from column ordinal '{_column}' or greater."); + // We know we need at least one iteration, a do while also helps with optimal codegen. + var buffer = Buffer; + var columnLength = 0; + do + { + if (columnLength > 0) + buffer.Skip(columnLength, allowIO); - if (column == _column) - return; + if (allowIO) + buffer.Ensure(sizeof(int)); + columnLength = buffer.ReadInt32(); + Debug.Assert(columnLength is DbNullSentinel or >= 0); + } while (++_column < ordinal); - // Need to seek forward + return columnLength; - // Shut down any streaming going on on the column - if (_columnStream != null) + // On the first call to SeekBackwards we'll fill up the columns list as we may need seek positions more than once. + [MethodImpl(MethodImplOptions.NoInlining)] + int SeekBackwards(int ordinal) { - _columnStream.Dispose(); - _columnStream = null; - // Disposing the stream leaves us at the end of the column - PosInColumn = ColumnLen; - } + var buffer = Buffer; + var columns = _columns; - // Skip to end of column if needed - // TODO: Simplify by better initializing _columnLen/_posInColumn - var remainingInColumn = ColumnLen == -1 ? 0 : ColumnLen - PosInColumn; - if (remainingInColumn > 0) - await Buffer.Skip(remainingInColumn, async); + (buffer.ReadPosition, var columnLength) = columns.Count is 0 + ? (_columnsStartPos, 0) + : columns[Math.Min(columns.Count - 1, ordinal)]; - // Skip over unwanted fields - for (; _column < column - 1; _column++) - { - await Buffer.Ensure(4, async); - var len = Buffer.ReadInt32(); - if (len != -1) - await Buffer.Skip(len, async); - } + while (columns.Count <= ordinal) + { + if (columnLength > 0) + buffer.Skip(columnLength); + columnLength = buffer.ReadInt32(); + columns.Add((buffer.ReadPosition, columnLength)); + } - await Buffer.Ensure(4, async); - ColumnLen = Buffer.ReadInt32(); - PosInColumn = 0; - _column = column; + return columnLength; + } } - Task SeekInColumn(int posInColumn, bool async, CancellationToken cancellationToken = default) + ValueTask BufferSeekToColumnAsync(int column, int ordinal, bool allowIO) { - if (_isSequential) - return SeekInColumnSequential(posInColumn, async); - - if (posInColumn > ColumnLen) - posInColumn = ColumnLen; + return !allowIO || column >= ordinal ? new(BufferSeekToColumn(column, ordinal, allowIO)) : Core(ordinal); - Buffer.ReadPosition = _columns[_column].Offset + posInColumn; - PosInColumn = posInColumn; - return Task.CompletedTask; - - async Task SeekInColumnSequential(int posInColumn, bool async) + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask Core(int ordinal) { - Debug.Assert(_column > -1); - - if (posInColumn < PosInColumn) - throw new InvalidOperationException("Attempt to read a position in the column which has already been read"); + // We know we need at least one iteration, a do while also helps with optimal codegen. + var buffer = Buffer; + var columnLength = 0; + do + { + if (columnLength > 0) + await buffer.Skip(async: true, columnLength).ConfigureAwait(false); - if (posInColumn > ColumnLen) - posInColumn = ColumnLen; + await buffer.EnsureAsync(sizeof(int)).ConfigureAwait(false); + columnLength = buffer.ReadInt32(); + Debug.Assert(columnLength is DbNullSentinel or >= 0); + } while (++_column < ordinal); - if (posInColumn > PosInColumn) - { - await Buffer.Skip(posInColumn - PosInColumn, async); - PosInColumn = posInColumn; - } + return columnLength; } } @@ -2281,56 +1965,42 @@ async Task SeekInColumnSequential(int posInColumn, bool async) Task ConsumeRow(bool async) { - Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); - - UniqueRowId++; + Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); - if (!_canConsumeRowNonSequentially) + if (!_isRowBuffered) return ConsumeRowSequential(async); // We get here, if we're in a non-sequential mode (or the row is already in the buffer) - ConsumeRowNonSequential(); + ConsumeBufferedRow(); return Task.CompletedTask; async Task ConsumeRowSequential(bool async) { - if (_columnStream != null) - { - _columnStream.Dispose(); - _columnStream = null; - // Disposing the stream leaves us at the end of the column - PosInColumn = ColumnLen; - } - - // TODO: Potential for code-sharing with ReadColumn above, which also skips - // Skip to end of column if needed - var remainingInColumn = ColumnLen == -1 ? 0 : ColumnLen - PosInColumn; - if (remainingInColumn > 0) - await Buffer.Skip(remainingInColumn, async); + if (async) + await PgReader.CommitAsync().ConfigureAwait(false); + else + PgReader.Commit(); // Skip over the remaining columns in the row - for (; _column < _numColumns - 1; _column++) + var buffer = Buffer; + // Written as a while to be able to increment _column directly after reading into it. + while (_column < ColumnCount - 1) { - await Buffer.Ensure(4, async); - var len = Buffer.ReadInt32(); - if (len != -1) - await Buffer.Skip(len, async); + await buffer.Ensure(4, async).ConfigureAwait(false); + var columnLength = buffer.ReadInt32(); + _column++; + Debug.Assert(columnLength >= DbNullSentinel); + if (columnLength > 0) + await buffer.Skip(async, columnLength).ConfigureAwait(false); } } } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void ConsumeRowNonSequential() + void ConsumeBufferedRow() { - Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); - - if (_columnStream != null) - { - _columnStream.Dispose(); - _columnStream = null; - // Disposing the stream leaves us at the end of the column - PosInColumn = ColumnLen; - } + Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); + PgReader.Commit(); Buffer.ReadPosition = _dataMsgEnd; } @@ -2338,77 +2008,91 @@ void ConsumeRowNonSequential() #region Checks - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void CheckResultSet() + [MethodImpl(MethodImplOptions.NoInlining)] + T DbNullValueOrThrow(int ordinal) { - switch (State) - { - case ReaderState.BeforeResult: - case ReaderState.InResult: - break; - case ReaderState.Closed: - throw new InvalidOperationException("The reader is closed"); - case ReaderState.Disposed: - throw new ObjectDisposedException(nameof(NpgsqlDataReader)); - default: - throw new InvalidOperationException("No resultset is currently being traversed"); - } + // When T is a Nullable (and only in that case), we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; + + if (typeof(T) == typeof(object)) + return (T)(object)DBNull.Value; + + ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription![ordinal]); + return default; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - FieldDescription CheckRowAndGetField(int column) + ReadConversionContext GetConversionContext(int ordinal, Type? type) { - switch (State) + ReadConversionContext context; + if (type is not null) { - case ReaderState.InResult: - break; - case ReaderState.Closed: - throw new InvalidOperationException("The reader is closed"); - case ReaderState.Disposed: - throw new ObjectDisposedException(nameof(NpgsqlDataReader)); - default: - throw new InvalidOperationException("No row is available"); - } + // Do the same check as the RowDescription indexer before we access the cache. + if ((uint)ordinal >= (uint)ColumnCount) + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", ColumnCount); - if (column < 0 || column >= RowDescription!.Count) - throw new IndexOutOfRangeException($"Column must be between {0} and {RowDescription!.Count - 1}"); + ref var contextRef = ref ConversionContextCache![ordinal]; - return RowDescription[column]; + Debug.Assert(contextRef.IsDefault || ReferenceEquals(Connector.SerializerOptions, contextRef.TypeInfo.Options), "Cache is bleeding over"); + + if (contextRef.TypeInfo is not { } typeInfo || !typeInfo.CanReadTo(type)) + RowDescription!.GetConversionContext(ordinal, type, ref contextRef); + + context = contextRef; + } + else + { + context = RowDescription![ordinal].ObjectConversionContext; + } + + return context; } /// /// Checks that we have a RowDescription, but not necessary an actual resultset - /// (for operations which work in SchemaOnly mode. + /// (for operations which work in SchemaOnly mode). /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - FieldDescription GetField(int column) + FieldDescription GetField(int ordinal) { - if (RowDescription == null) - throw new InvalidOperationException("No resultset is currently being traversed"); + ThrowIfClosedOrDisposed(); + if (RowDescription is { } columns) + return columns[ordinal]; - if (column < 0 || column >= RowDescription.Count) - throw new IndexOutOfRangeException($"Column must be between {0} and {RowDescription.Count - 1}"); + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); + return default!; + } - return RowDescription[column]; + void ThrowIfClosedOrDisposed() + { + if (State is (ReaderState.Closed or ReaderState.Disposed) and var state) + ThrowInvalidState(state); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void CheckColumnStart() + [MemberNotNull(nameof(RowDescription))] + void ThrowIfNotInResult() { - Debug.Assert(_isSequential); - if (PosInColumn != 0) - throw new InvalidOperationException("Attempt to read a position in the column which has already been read"); + if (State is not ReaderState.InResult and var state) + ThrowInvalidState(state); + + Debug.Assert(RowDescription is not null); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void CheckClosedOrDisposed() + [MethodImpl(MethodImplOptions.NoInlining)] + static void ThrowInvalidState(ReaderState state) { - switch (State) + switch (state) { case ReaderState.Closed: - throw new InvalidOperationException("The reader is closed"); + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + break; case ReaderState.Disposed: - throw new ObjectDisposedException(nameof(NpgsqlDataReader)); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + break; + default: + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); + break; } } @@ -2423,7 +2107,7 @@ void CheckClosedOrDisposed() internal void UnbindIfNecessary() { // We're closing the connection, but reader is not yet disposed - // We have to unbind the reader from the connector, otherwise there could be a concurency issues + // We have to unbind the reader from the connector, otherwise there could be a concurrency issues // See #3126 and #3290 if (State != ReaderState.Disposed) { diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 170c103a6b..a32a9480bc 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -4,17 +4,13 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Net.Security; -using System.Runtime.CompilerServices; -using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.ResolverFactories; using Npgsql.Properties; -using Npgsql.TypeMapping; using Npgsql.Util; namespace Npgsql; @@ -34,32 +30,44 @@ public abstract class NpgsqlDataSource : DbDataSource internal NpgsqlDataSourceConfiguration Configuration { get; } internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } - readonly List _resolverFactories; - readonly Dictionary _userTypeMappings; - readonly INpgsqlNameTranslator _defaultNameTranslator; + readonly PgTypeInfoResolverChain _resolverChain; + readonly IEnumerable _dbTypeResolverFactories; - internal TypeMapper TypeMapper { get; private set; } = null!; // Initialized at bootstrapping + internal ReloadableState CurrentReloadableState = null!; // Initialized during bootstrapping. - /// - /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). - /// - internal NpgsqlDatabaseInfo DatabaseInfo { get; set; } = null!; // Initialized at bootstrapping + // Initialized at bootstrapping + internal sealed class ReloadableState(NpgsqlDatabaseInfo databaseInfo, PgSerializerOptions serializerOptions, IDbTypeResolver? dbTypeResolver) + { + /// + /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). + /// + public NpgsqlDatabaseInfo DatabaseInfo { get; } = databaseInfo; + + public PgSerializerOptions SerializerOptions { get; } = serializerOptions; + + public IDbTypeResolver? DbTypeResolver { get; } = dbTypeResolver; + } + + internal TransportSecurityHandler TransportSecurityHandler { get; } - internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } - internal Action? ClientCertificatesCallback { get; } + internal Action? SslClientAuthenticationOptionsCallback { get; } + readonly Func? _passwordProvider; + readonly Func>? _passwordProviderAsync; readonly Func>? _periodicPasswordProvider; readonly TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; + internal IntegratedSecurityHandler IntegratedSecurityHandler { get; } + internal Action? ConnectionInitializer { get; } internal Func? ConnectionInitializerAsync { get; } - readonly Timer? _passwordProviderTimer; + readonly Timer? _periodicPasswordProviderTimer; readonly CancellationTokenSource? _timerPasswordProviderCancellationTokenSource; readonly Task _passwordRefreshTask = null!; string? _password; - bool _isBootstrapped; + internal bool IsBootstrapped { get; private set; } volatile DatabaseStateInfo _databaseStateInfo = new(); @@ -68,6 +76,9 @@ public abstract class NpgsqlDataSource : DbDataSource private protected readonly Dictionary> _pendingEnlistedConnectors = new(); + internal MetricsReporter MetricsReporter { get; } + internal string Name { get; } + internal abstract (int Total, int Idle, int Busy) Statistics { get; } volatile int _isDisposed; @@ -79,31 +90,51 @@ private protected readonly Dictionary> _pendi /// readonly SemaphoreSlim _setupMappingsSemaphore = new(1); - internal NpgsqlDataSource( - NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig) - { - Settings = settings; - ConnectionString = settings.PersistSecurityInfo - ? settings.ToString() - : settings.ToStringWithoutPassword(); + readonly INpgsqlNameTranslator _defaultNameTranslator; + readonly IDisposable? _eventSourceEvents; + internal NpgsqlDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig, bool reportMetrics) + { Configuration = dataSourceConfig; - (LoggingConfiguration, - UserCertificateValidationCallback, - ClientCertificatesCallback, + (var name, + LoggingConfiguration, + _, + _, + TransportSecurityHandler, + IntegratedSecurityHandler, + SslClientAuthenticationOptionsCallback, + _passwordProvider, + _passwordProviderAsync, _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, - _resolverFactories, - _userTypeMappings, + _resolverChain, + _dbTypeResolverFactories, _defaultNameTranslator, ConnectionInitializer, - ConnectionInitializerAsync) + ConnectionInitializerAsync, + _) = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; + Debug.Assert(_passwordProvider is null || _passwordProviderAsync is not null); + + Settings = settings; + + if (settings.PersistSecurityInfo) + { + ConnectionString = settings.ToString(); + + // The data source name is reported in tracing/metrics, so avoid leaking the password through there. + Name = name ?? settings.ApplicationName ?? settings.ToStringWithoutPassword(); + } + else + { + ConnectionString = settings.ToStringWithoutPassword(); + Name = name ?? settings.ApplicationName ?? ConnectionString; + } + _password = settings.Password; if (_periodicPasswordSuccessRefreshInterval != default) @@ -112,23 +143,35 @@ internal NpgsqlDataSource( _timerPasswordProviderCancellationTokenSource = new(); - // Create the timer, but don't start it; the manual run below will will schedule the first refresh. - _passwordProviderTimer = new Timer(state => _ = RefreshPassword(), null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); + // Create the timer, but don't start it; the manual run below will schedule the first refresh. + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + _periodicPasswordProviderTimer = new Timer(state => _ = RefreshPassword(), null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); // Trigger the first refresh attempt right now, outside the timer; this allows us to capture the Task so it can be observed // in GetPasswordAsync. _passwordRefreshTask = Task.Run(RefreshPassword); } + + // TODO this needs a rework, but for now we just avoid tracking multi-host data sources directly. + if (reportMetrics) + { + MetricsReporter = new MetricsReporter(this); + if (!NpgsqlEventSource.Log.TryTrackDataSource(Name, this, out _eventSourceEvents)) + _connectionLogger.LogDebug("NpgsqlEventSource could not start tracking a DataSource, " + + "this can happen if more than one data source uses the same connection string."); + } + else + { + // This is not accessed anywhere currently for multi-host data sources. + // Connectors which handle the metrics always access their nonpooling/pooling data source instead. + MetricsReporter = null!; + } } - /// - /// Returns a new, unopened connection from this data source. - /// + /// public new NpgsqlConnection CreateConnection() => NpgsqlConnection.FromDataSource(this); - /// - /// Returns a new, opened connection from this data source. - /// + /// public new NpgsqlConnection OpenConnection() { var connection = CreateConnection(); @@ -145,12 +188,11 @@ internal NpgsqlDataSource( } } - /// - /// Returns a new, opened connection from this data source. - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// + /// + protected override DbConnection OpenDbConnection() + => OpenConnection(); + + /// public new async ValueTask OpenConnectionAsync(CancellationToken cancellationToken = default) { var connection = CreateConnection(); @@ -167,13 +209,17 @@ internal NpgsqlDataSource( } } + /// + protected override async ValueTask OpenDbConnectionAsync(CancellationToken cancellationToken = default) + => await OpenConnectionAsync(cancellationToken).ConfigureAwait(false); + /// protected override DbConnection CreateDbConnection() => CreateConnection(); /// protected override DbCommand CreateDbCommand(string? commandText = null) - => CreateCommand(); + => CreateCommand(commandText); /// protected override DbBatch CreateDbBatch() @@ -192,6 +238,12 @@ protected override DbBatch CreateDbBatch() public new NpgsqlBatch CreateBatch() => new NpgsqlDataSourceBatch(CreateConnection()); + /// + /// If the data source pools connections, clears any idle connections and flags any busy connections to be closed as soon as they're + /// returned to the pool. + /// + public abstract void Clear(); + /// /// Creates a new for the given . /// @@ -204,6 +256,29 @@ public static NpgsqlDataSource Create(string connectionString) public static NpgsqlDataSource Create(NpgsqlConnectionStringBuilder connectionStringBuilder) => Create(connectionStringBuilder.ToString()); + /// + /// Flushes the type cache for this data source. + /// Type changes will appear for connections only after they are re-opened from the pool. + /// + public void ReloadTypes() + { + using var connection = OpenConnection(); + connection.ReloadTypes(); + } + + /// + /// Flushes the type cache for this data source. + /// Type changes will appear for connections only after they are re-opened from the pool. + /// + public async Task ReloadTypesAsync(CancellationToken cancellationToken = default) + { + var connection = await OpenConnectionAsync(cancellationToken).ConfigureAwait(false); + await using (connection.ConfigureAwait(false)) + { + await connection.ReloadTypesAsync(cancellationToken).ConfigureAwait(false); + } + } + internal async Task Bootstrap( NpgsqlConnector connector, NpgsqlTimeout timeout, @@ -211,11 +286,11 @@ internal async Task Bootstrap( bool async, CancellationToken cancellationToken) { - if (_isBootstrapped && !forceReload) + if (IsBootstrapped && !forceReload) return; var hasSemaphore = async - ? await _setupMappingsSemaphore.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken) + ? await _setupMappingsSemaphore.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken).ConfigureAwait(false) : _setupMappingsSemaphore.Wait(timeout.CheckAndGetTimeLeft(), cancellationToken); if (!hasSemaphore) @@ -223,30 +298,60 @@ internal async Task Bootstrap( try { - if (_isBootstrapped && !forceReload) + if (IsBootstrapped && !forceReload) return; // The type loading below will need to send queries to the database, and that depends on a type mapper being set up (even if its - // empty). So we set up here, and then later inject the DatabaseInfo. - var typeMapper = new TypeMapper(connector, _defaultNameTranslator); - connector.TypeMapper = typeMapper; + // empty). So we set up a minimal version here, and then later inject the actual DatabaseInfo. + connector.ReloadableState = new( + databaseInfo: PostgresMinimalDatabaseInfo.DefaultTypeCatalog, + serializerOptions: new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) + { + TextEncoding = connector.TextEncoding, + TypeInfoResolver = AdoTypeInfoResolverFactory.Instance.CreateResolver(), + }, + dbTypeResolver: null); NpgsqlDatabaseInfo databaseInfo; using (connector.StartUserAction(ConnectorState.Executing, cancellationToken)) - databaseInfo = await NpgsqlDatabaseInfo.Load(connector, timeout, async); + databaseInfo = await NpgsqlDatabaseInfo.Load(connector, timeout, async).ConfigureAwait(false); - DatabaseInfo = databaseInfo; - connector.DatabaseInfo = databaseInfo; - typeMapper.Initialize(databaseInfo, _resolverFactories, _userTypeMappings); - TypeMapper = typeMapper; - - _isBootstrapped = true; + var serializerOptions = new PgSerializerOptions(databaseInfo, _resolverChain, CreateTimeZoneProvider(connector.Timezone)) + { + ArrayNullabilityMode = Settings.ArrayNullabilityMode, + EnableDateTimeInfinityConversions = !Statics.DisableDateTimeInfinityConversions, + TextEncoding = connector.TextEncoding, + DefaultNameTranslator = _defaultNameTranslator + }; + + var resolvers = new List(); + foreach (var dbTypeResolverFactory in _dbTypeResolverFactories) + resolvers.Add(dbTypeResolverFactory.CreateDbTypeResolver(databaseInfo)); + + connector.ReloadableState = CurrentReloadableState = new ReloadableState( + databaseInfo: databaseInfo, + serializerOptions: serializerOptions, + dbTypeResolver: new ChainDbTypeResolver(resolvers)); + + IsBootstrapped = true; } finally { _setupMappingsSemaphore.Release(); } + + // Func in a static function to make sure we don't capture state that might not stay around, like a connector. + static Func CreateTimeZoneProvider(string postgresTimeZone) + => () => + { + if (string.Equals(postgresTimeZone, "localtime", StringComparison.OrdinalIgnoreCase)) + throw new TimeZoneNotFoundException( + "The special PostgreSQL timezone 'localtime' is not supported when reading values of type 'timestamp with time zone'. " + + "Please specify a real timezone in 'postgresql.conf' on the server, or set the 'PGTZ' environment variable on the client."); + + return postgresTimeZone; + }; } #region Password management @@ -259,43 +364,63 @@ public string Password { set { - if (_periodicPasswordProvider is not null) + if (_passwordProvider is not null || _periodicPasswordProvider is not null) throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); _password = value; } } - internal async ValueTask GetPassword(bool async, CancellationToken cancellationToken = default) + internal ValueTask GetPassword(bool async, CancellationToken cancellationToken = default) { + if (_passwordProvider is not null) + return GetPassword(async, cancellationToken); + // A periodic password provider is configured, but the first refresh hasn't completed yet (race condition). - // Wait until it completes. if (_password is null && _periodicPasswordProvider is not null) + return GetInitialPeriodicPassword(async); + + return new(_password); + + async ValueTask GetInitialPeriodicPassword(bool async) { if (async) - await _passwordRefreshTask; + await _passwordRefreshTask.ConfigureAwait(false); else _passwordRefreshTask.GetAwaiter().GetResult(); - Debug.Assert(_password is not null); + + return _password; } - return _password; + async ValueTask GetPassword(bool async, CancellationToken cancellationToken) + { + try + { + return async ? await _passwordProviderAsync!(Settings, cancellationToken).ConfigureAwait(false) : _passwordProvider(Settings); + } + catch (Exception e) + { + _connectionLogger.LogError(e, "Password provider threw an exception"); + + throw new NpgsqlException("An exception was thrown from the password provider", e); + } + } } async Task RefreshPassword() { try { - _password = await _periodicPasswordProvider!(Settings, _timerPasswordProviderCancellationTokenSource!.Token); + _password = await _periodicPasswordProvider!(Settings, _timerPasswordProviderCancellationTokenSource!.Token).ConfigureAwait(false); - _passwordProviderTimer!.Change(_periodicPasswordSuccessRefreshInterval, Timeout.InfiniteTimeSpan); + _periodicPasswordProviderTimer!.Change(_periodicPasswordSuccessRefreshInterval, Timeout.InfiniteTimeSpan); } catch (Exception e) { _connectionLogger.LogError(e, "Periodic password provider threw an exception"); - _passwordProviderTimer!.Change(_periodicPasswordFailureRefreshInterval, Timeout.InfiniteTimeSpan); + _periodicPasswordProviderTimer!.Change(_periodicPasswordFailureRefreshInterval, Timeout.InfiniteTimeSpan); throw new NpgsqlException("An exception was thrown from the periodic password provider", e); } @@ -313,8 +438,6 @@ internal abstract ValueTask Get( internal abstract void Return(NpgsqlConnector connector); - internal abstract void Clear(); - internal abstract bool OwnsConnectors { get; } #region Database state management @@ -339,9 +462,9 @@ internal DatabaseState UpdateDatabaseState( Debug.Assert(this is not NpgsqlMultiHostDataSource); var databaseStateInfo = _databaseStateInfo; - + if (!ignoreTimeStamp && timeStamp <= databaseStateInfo.TimeStamp) - return _databaseStateInfo.State; + return databaseStateInfo.State; _databaseStateInfo = new(newState, new NpgsqlTimeout(stateExpiration), timeStamp); @@ -385,7 +508,7 @@ internal virtual bool TryRentEnlistedPending(Transaction transaction, NpgsqlConn connector = null; return false; } - connector = list[list.Count - 1]; + connector = list[^1]; list.RemoveAt(list.Count - 1); if (list.Count == 0) _pendingEnlistedConnectors.Remove(transaction); @@ -398,53 +521,83 @@ internal virtual bool TryRentEnlistedPending(Transaction transaction, NpgsqlConn #region Dispose /// - protected override void Dispose(bool disposing) + protected sealed override void Dispose(bool disposing) { if (disposing && Interlocked.CompareExchange(ref _isDisposed, 1, 0) == 0) + DisposeBase(); + } + + /// + protected virtual void DisposeBase() + { + var cancellationTokenSource = _timerPasswordProviderCancellationTokenSource; + if (cancellationTokenSource is not null) { - var cancellationTokenSource = _timerPasswordProviderCancellationTokenSource; - if (cancellationTokenSource is not null) - { - cancellationTokenSource.Cancel(); - cancellationTokenSource.Dispose(); - } + cancellationTokenSource.Cancel(); + cancellationTokenSource.Dispose(); + } - _passwordProviderTimer?.Dispose(); + _periodicPasswordProviderTimer?.Dispose(); + if (MetricsReporter is not null) + { + MetricsReporter.Dispose(); + _eventSourceEvents?.Dispose(); + } - _setupMappingsSemaphore.Dispose(); + // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer + // Due to possible concurrent access, which might lead to deadlock + // See issue #6115 - Clear(); - } + Clear(); } /// - protected override ValueTask DisposeAsyncCore() + protected sealed override ValueTask DisposeAsyncCore() { - // TODO: async Clear, #4499 - Dispose(true); + if (Interlocked.CompareExchange(ref _isDisposed, 1, 0) == 0) + return DisposeAsyncBase(); return default; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private protected void CheckDisposed() + /// + protected virtual async ValueTask DisposeAsyncBase() { - if (_isDisposed == 1) - throw new ObjectDisposedException(GetType().FullName); + var cancellationTokenSource = _timerPasswordProviderCancellationTokenSource; + if (cancellationTokenSource is not null) + { + cancellationTokenSource.Cancel(); + cancellationTokenSource.Dispose(); + } + + if (_periodicPasswordProviderTimer is not null) + await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); + + if (MetricsReporter is not null) + { + MetricsReporter.Dispose(); + _eventSourceEvents?.Dispose(); + } + // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer + // Due to possible concurrent access, which might lead to deadlock + // See issue #6115 + + // TODO: async Clear, #4499 + Clear(); } + private protected void CheckDisposed() + => ObjectDisposedException.ThrowIf(_isDisposed == 1, this); + #endregion - - class DatabaseStateInfo + + sealed class DatabaseStateInfo(DatabaseState state, NpgsqlTimeout timeout, DateTime timeStamp) { - internal readonly DatabaseState State; - internal readonly NpgsqlTimeout Timeout; + internal readonly DatabaseState State = state; + internal readonly NpgsqlTimeout Timeout = timeout; // While the TimeStamp is not strictly required, it does lower the risk of overwriting the current state with an old value - internal readonly DateTime TimeStamp; + internal readonly DateTime TimeStamp = timeStamp; - public DatabaseStateInfo() : this(default, default, default) {} - - public DatabaseStateInfo(DatabaseState state, NpgsqlTimeout timeout, DateTime timeStamp) - => (State, Timeout, TimeStamp) = (state, timeout, timeStamp); + public DatabaseStateInfo() : this(default, default, default) { } } } diff --git a/src/Npgsql/NpgsqlDataSourceBatch.cs b/src/Npgsql/NpgsqlDataSourceBatch.cs index b3235c9d46..c5b44e9ff6 100644 --- a/src/Npgsql/NpgsqlDataSourceBatch.cs +++ b/src/Npgsql/NpgsqlDataSourceBatch.cs @@ -1,5 +1,4 @@ using System; -using System.Data; using System.Data.Common; using System.Threading; using System.Threading.Tasks; @@ -10,7 +9,7 @@ namespace Npgsql; sealed class NpgsqlDataSourceBatch : NpgsqlBatch { internal NpgsqlDataSourceBatch(NpgsqlConnection connection) - : base(new NpgsqlDataSourceCommand(DefaultBatchCommandsSize, connection)) + : base(static (conn, batch) => new NpgsqlDataSourceCommand(batch, DefaultBatchCommandsSize, conn), connection) { } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 9cc6eebf8a..2148aec035 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -1,15 +1,15 @@ using System; -using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Net.Security; -using System.Reflection; using System.Security.Cryptography.X509Certificates; +using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.Properties; +using Npgsql.Internal; +using Npgsql.Internal.ResolverFactories; +using Npgsql.NameTranslation; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -18,44 +18,85 @@ namespace Npgsql; /// /// Provides a simple API for configuring and creating an , from which database connections can be obtained. /// -public class NpgsqlDataSourceBuilder : INpgsqlTypeMapper +public sealed class NpgsqlDataSourceBuilder : INpgsqlTypeMapper { - ILoggerFactory? _loggerFactory; - bool _sensitiveDataLoggingEnabled; + static UnsupportedTypeInfoResolver UnsupportedTypeInfoResolver { get; } = new(); - RemoteCertificateValidationCallback? _userCertificateValidationCallback; - Action? _clientCertificatesCallback; + readonly NpgsqlSlimDataSourceBuilder _internalBuilder; - Func>? _periodicPasswordProvider; - TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; - - readonly List _resolverFactories = new(); - readonly Dictionary _userTypeMappings = new(); + /// + /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. + /// + public string? Name + { + get => _internalBuilder.Name; + set => _internalBuilder.Name = value; + } /// - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; - - Action? _syncConnectionInitializer; - Func? _asyncConnectionInitializer; + public INpgsqlNameTranslator DefaultNameTranslator + { + get => _internalBuilder.DefaultNameTranslator; + set => _internalBuilder.DefaultNameTranslator = value; + } /// - /// A connection string builder that can be used to configured the connection string on the builder. + /// A connection string builder that can be used to configure the connection string on the builder. /// - public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } + public NpgsqlConnectionStringBuilder ConnectionStringBuilder => _internalBuilder.ConnectionStringBuilder; /// /// Returns the connection string, as currently configured on the builder. /// - public string ConnectionString => ConnectionStringBuilder.ToString(); + public string ConnectionString => _internalBuilder.ConnectionString; + + internal static void ResetGlobalMappings(bool overwrite) + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers([ + overwrite ? new AdoTypeInfoResolverFactory() : AdoTypeInfoResolverFactory.Instance, + new ExtraConversionResolverFactory(), + new JsonTypeInfoResolverFactory(), + new RecordTypeInfoResolverFactory(), + new FullTextSearchTypeInfoResolverFactory(), + new NetworkTypeInfoResolverFactory(), + new GeometricTypeInfoResolverFactory(), + new LTreeTypeInfoResolverFactory(), + new CubeTypeInfoResolverFactory() + ], static () => + { + var builder = new PgTypeInfoResolverChainBuilder(); + builder.EnableRanges(); + builder.EnableMultiranges(); + builder.EnableArrays(); + return builder; + }, overwrite); + + static NpgsqlDataSourceBuilder() + => ResetGlobalMappings(overwrite: false); /// /// Constructs a new , optionally starting out from the given . /// public NpgsqlDataSourceBuilder(string? connectionString = null) { - ConnectionStringBuilder = new NpgsqlConnectionStringBuilder(connectionString); - - ResetTypeMappings(); + _internalBuilder = new(new NpgsqlConnectionStringBuilder(connectionString)); + _internalBuilder.ConfigureDefaultFactories = static instance => + { + instance.AppendDefaultFactories(); + instance.AppendResolverFactory(new ExtraConversionResolverFactory()); + instance.AppendResolverFactory(() => new JsonTypeInfoResolverFactory(instance.JsonSerializerOptions)); + instance.AppendResolverFactory(new RecordTypeInfoResolverFactory()); + instance.AppendResolverFactory(new FullTextSearchTypeInfoResolverFactory()); + instance.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); + instance.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); + instance.AppendResolverFactory(new LTreeTypeInfoResolverFactory()); + instance.AppendResolverFactory(new CubeTypeInfoResolverFactory()); + }; + _internalBuilder.ConfigureResolverChain = static chain => chain.Add(UnsupportedTypeInfoResolver); + _internalBuilder.EnableTransportSecurity(); + _internalBuilder.EnableIntegratedSecurity(); + _internalBuilder.EnableRanges(); + _internalBuilder.EnableMultiranges(); + _internalBuilder.EnableArrays(); } /// @@ -65,7 +106,7 @@ public NpgsqlDataSourceBuilder(string? connectionString = null) /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder UseLoggerFactory(ILoggerFactory? loggerFactory) { - _loggerFactory = loggerFactory; + _internalBuilder.UseLoggerFactory(loggerFactory); return this; } @@ -78,7 +119,82 @@ public NpgsqlDataSourceBuilder UseLoggerFactory(ILoggerFactory? loggerFactory) /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabled = true) { - _sensitiveDataLoggingEnabled = parameterLoggingEnabled; + _internalBuilder.EnableParameterLogging(parameterLoggingEnabled); + return this; + } + + /// + /// Configures type loading options for the DataSource. + /// + public NpgsqlDataSourceBuilder ConfigureTypeLoading(Action configureAction) + { + _internalBuilder.ConfigureTypeLoading(configureAction); + return this; + } + + /// + /// Configures OpenTelemetry tracing options. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder ConfigureTracing(Action configureAction) + { + _internalBuilder.ConfigureTracing(configureAction); + return this; + } + + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + { + _internalBuilder.ConfigureJsonOptions(serializerOptions); + return this; + } + + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public NpgsqlDataSourceBuilder EnableDynamicJson(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) + { + _internalBuilder.EnableDynamicJson(jsonbClrTypes, jsonClrTypes); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlDataSourceBuilder EnableRecordsAsTuples() + { + AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlDataSourceBuilder EnableUnmappedTypes() + { + AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); return this; } @@ -99,11 +215,10 @@ public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabl /// /// /// The same builder instance so that multiple calls can be chained. - public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback( - RemoteCertificateValidationCallback userCertificateValidationCallback) + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] + public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback(RemoteCertificateValidationCallback userCertificateValidationCallback) { - _userCertificateValidationCallback = userCertificateValidationCallback; - + _internalBuilder.UseUserCertificateValidationCallback(userCertificateValidationCallback); return this; } @@ -112,13 +227,11 @@ public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback( /// /// The client certificate to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseClientCertificate(X509Certificate? clientCertificate) { - if (clientCertificate is null) - return UseClientCertificatesCallback(null); - - var clientCertificates = new X509CertificateCollection { clientCertificate }; - return UseClientCertificates(clientCertificates); + _internalBuilder.UseClientCertificate(clientCertificate); + return this; } /// @@ -126,8 +239,28 @@ public NpgsqlDataSourceBuilder UseClientCertificate(X509Certificate? clientCerti /// /// The client certificate collection to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? clientCertificates) - => UseClientCertificatesCallback(clientCertificates is null ? null : certs => certs.AddRange(clientCertificates)); + { + _internalBuilder.UseClientCertificates(clientCertificates); + return this; + } + + /// + /// When using SSL/TLS, this is a callback that allows customizing SslStream's authentication options. + /// + /// The callback to customize SslStream's authentication options. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseSslClientAuthenticationOptionsCallback(Action? sslClientAuthenticationOptionsCallback) + { + _internalBuilder.UseSslClientAuthenticationOptionsCallback(sslClientAuthenticationOptionsCallback); + return this; + } /// /// Specifies a callback to modify the collection of SSL/TLS client certificates which Npgsql will send to PostgreSQL for @@ -146,10 +279,65 @@ public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? /// /// /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseClientCertificatesCallback(Action? clientCertificatesCallback) { - _clientCertificatesCallback = clientCertificatesCallback; + _internalBuilder.UseClientCertificatesCallback(clientCertificatesCallback); + return this; + } + + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificate. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertificate) + { + _internalBuilder.UseRootCertificate(rootCertificate); + return this; + } + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificates. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseRootCertificates(X509Certificate2Collection? rootCertificates) + { + _internalBuilder.UseRootCertificates(rootCertificates); + return this; + } + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificate. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + public NpgsqlDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) + { + _internalBuilder.UseRootCertificateCallback(rootCertificateCallback); + return this; + } + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificates. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseRootCertificatesCallback(Func? rootCertificateCallback) + { + _internalBuilder.UseRootCertificatesCallback(rootCertificateCallback); return this; } @@ -178,17 +366,47 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( TimeSpan successRefreshInterval, TimeSpan failureRefreshInterval) { - if (successRefreshInterval < TimeSpan.Zero) - throw new ArgumentException( - string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(successRefreshInterval)), nameof(successRefreshInterval)); - if (failureRefreshInterval < TimeSpan.Zero) - throw new ArgumentException( - string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(failureRefreshInterval)), nameof(failureRefreshInterval)); + _internalBuilder.UsePeriodicPasswordProvider(passwordProvider, successRefreshInterval, failureRefreshInterval); + return this; + } - _periodicPasswordProvider = passwordProvider; - _periodicPasswordSuccessRefreshInterval = successRefreshInterval; - _periodicPasswordFailureRefreshInterval = failureRefreshInterval; + /// + /// Configures a password provider, which is called by the data source when opening connections. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// The same builder instance so that multiple calls can be chained. + /// + /// + /// The provided callback is invoked when opening connections. Therefore its important the callback internally depends on cached + /// data or returns quickly otherwise. Any unnecessary delay will affect connection opening time. + /// + /// + public NpgsqlDataSourceBuilder UsePasswordProvider( + Func? passwordProvider, + Func>? passwordProviderAsync) + { + _internalBuilder.UsePasswordProvider(passwordProvider, passwordProviderAsync); + return this; + } + /// + /// When using Kerberos, this is a callback that allows customizing default settings for Kerberos authentication. + /// + /// The callback containing logic to customize Kerberos authentication settings. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseNegotiateOptionsCallback(Action? negotiateOptionsCallback) + { + _internalBuilder.UseNegotiateOptionsCallback(negotiateOptionsCallback); return this; } @@ -197,105 +415,148 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( #region Type mapping /// - public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) - => _resolverFactories.Insert(0, resolverFactory); + void INpgsqlTypeMapper.AddDbTypeResolverFactory(DbTypeResolverFactory factory) + => ((INpgsqlTypeMapper)_internalBuilder).AddDbTypeResolverFactory(factory); /// - public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - where TEnum : struct, Enum - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); + [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) + => _internalBuilder.AddTypeInfoResolverFactory(factory); - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); + /// + void INpgsqlTypeMapper.Reset() => ((INpgsqlTypeMapper)_internalBuilder).Reset(); - _userTypeMappings[pgName] = new UserEnumTypeMapping(pgName, nameTranslator); + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET enum type to be mapped + public NpgsqlDataSourceBuilder MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + { + _internalBuilder.MapEnum(pgName, nameTranslator); return this; } /// - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - return _userTypeMappings.Remove(pgName); - } + => _internalBuilder.UnmapEnum(pgName, nameTranslator); - /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public NpgsqlDataSourceBuilder MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(T), nameTranslator); - - _userTypeMappings[pgName] = new UserCompositeTypeMapping(pgName, nameTranslator); + _internalBuilder.MapEnum(clrType, pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - { - var openMethod = typeof(NpgsqlDataSourceBuilder).GetMethod(nameof(MapComposite), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; - var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _internalBuilder.UnmapEnum(clrType, pgName, nameTranslator); + /// + /// Maps a CLR type to a PostgreSQL composite type. + /// + /// + /// CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your members to manually specify a PostgreSQL name. + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET type to be mapped + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public NpgsqlDataSourceBuilder MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _internalBuilder.MapComposite(typeof(T), pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => UnmapComposite(typeof(T), pgName, nameTranslator); - - /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(clrType, nameTranslator); + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _internalBuilder.UnmapComposite(typeof(T), pgName, nameTranslator); - return _userTypeMappings.Remove(pgName); - } - - void INpgsqlTypeMapper.Reset() - => ResetTypeMappings(); - - void ResetTypeMappings() + /// + /// Maps a CLR type to a composite type. + /// + /// + /// Maps CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// The .NET type to be mapped. + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public NpgsqlDataSourceBuilder MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - var globalMapper = GlobalTypeMapper.Instance; - globalMapper.Lock.EnterReadLock(); - try - { - _resolverFactories.Clear(); - foreach (var resolverFactory in globalMapper.ResolverFactories) - _resolverFactories.Add(resolverFactory); - - _userTypeMappings.Clear(); - foreach (var kv in globalMapper.UserTypeMappings) - _userTypeMappings[kv.Key] = kv.Value; - } - finally - { - globalMapper.Lock.ExitReadLock(); - } + _internalBuilder.MapComposite(clrType, pgName, nameTranslator); + return this; } - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _internalBuilder.UnmapComposite(clrType, pgName, nameTranslator); #endregion Type mapping @@ -324,12 +585,7 @@ public NpgsqlDataSourceBuilder UsePhysicalConnectionInitializer( Action? connectionInitializer, Func? connectionInitializerAsync) { - if (connectionInitializer is null != connectionInitializerAsync is null) - throw new ArgumentException(NpgsqlStrings.SyncAndAsyncConnectionInitializersRequired); - - _syncConnectionInitializer = connectionInitializer; - _asyncConnectionInitializer = connectionInitializerAsync; - + _internalBuilder.UsePhysicalConnectionInitializer(connectionInitializer, connectionInitializerAsync); return this; } @@ -337,68 +593,72 @@ public NpgsqlDataSourceBuilder UsePhysicalConnectionInitializer( /// Builds and returns an which is ready for use. /// public NpgsqlDataSource Build() - { - var config = PrepareConfiguration(); - - if (ConnectionStringBuilder.Host!.Contains(",")) - { - ValidateMultiHost(); - - return new NpgsqlMultiHostDataSource(ConnectionStringBuilder, config); - } - - return ConnectionStringBuilder.Multiplexing - ? new MultiplexingDataSource(ConnectionStringBuilder, config) - : ConnectionStringBuilder.Pooling - ? new PoolingDataSource(ConnectionStringBuilder, config) - : new UnpooledDataSource(ConnectionStringBuilder, config); - } + => _internalBuilder.Build(); /// /// Builds and returns a which is ready for use for load-balancing and failover scenarios. /// public NpgsqlMultiHostDataSource BuildMultiHost() - { - var config = PrepareConfiguration(); + => _internalBuilder.BuildMultiHost(); + + // Used in testing. + internal (NpgsqlConnectionStringBuilder, NpgsqlDataSourceConfiguration) PrepareConfiguration() + => _internalBuilder.PrepareConfiguration(); + + INpgsqlTypeMapper INpgsqlTypeMapper.ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + => ConfigureJsonOptions(serializerOptions); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode( + "Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableDynamicJson(Type[]? jsonbClrTypes, Type[]? jsonClrTypes) + => EnableDynamicJson(jsonbClrTypes, jsonClrTypes); + + [RequiresUnreferencedCode( + "The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableRecordsAsTuples() + => EnableRecordsAsTuples(); + + [RequiresUnreferencedCode( + "The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableUnmappedTypes() + => EnableUnmappedTypes(); - ValidateMultiHost(); + /// + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapEnum(pgName, nameTranslator); + return this; + } - return new(ConnectionStringBuilder, config); + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapEnum(clrType, pgName, nameTranslator); + return this; } - NpgsqlDataSourceConfiguration PrepareConfiguration() + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName, INpgsqlNameTranslator? nameTranslator) { - ConnectionStringBuilder.PostProcessAndValidate(); + _internalBuilder.MapComposite(typeof(T), pgName, nameTranslator); + return this; + } - if (_periodicPasswordProvider is not null && - (ConnectionStringBuilder.Password is not null || ConnectionStringBuilder.Passfile is not null)) - { - throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); - } - - return new( - _loggerFactory is null - ? NpgsqlLoggingConfiguration.NullConfiguration - : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), - _userCertificateValidationCallback, - _clientCertificatesCallback, - _periodicPasswordProvider, - _periodicPasswordSuccessRefreshInterval, - _periodicPasswordFailureRefreshInterval, - _resolverFactories, - _userTypeMappings, - DefaultNameTranslator, - _syncConnectionInitializer, - _asyncConnectionInitializer); - } - - void ValidateMultiHost() - { - if (ConnectionStringBuilder.TargetSessionAttributes is not null) - throw new InvalidOperationException(NpgsqlStrings.CannotSpecifyTargetSessionAttributes); - if (ConnectionStringBuilder.Multiplexing) - throw new NotSupportedException("Multiplexing is not supported with multiple hosts"); - if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) - throw new NotSupportedException("Replication is not supported with multiple hosts"); + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapComposite(clrType, pgName, nameTranslator); + return this; } } diff --git a/src/Npgsql/NpgsqlDataSourceCommand.cs b/src/Npgsql/NpgsqlDataSourceCommand.cs index f6a500592d..d293194f66 100644 --- a/src/Npgsql/NpgsqlDataSourceCommand.cs +++ b/src/Npgsql/NpgsqlDataSourceCommand.cs @@ -15,23 +15,22 @@ internal NpgsqlDataSourceCommand(NpgsqlConnection connection) } // For NpgsqlBatch only - internal NpgsqlDataSourceCommand(int batchCommandCapacity, NpgsqlConnection connection) - : base(batchCommandCapacity, connection) + internal NpgsqlDataSourceCommand(NpgsqlBatch batch, int batchCommandCapacity, NpgsqlConnection connection) + : base(batch, batchCommandCapacity, connection) { } internal override async ValueTask ExecuteReader( - CommandBehavior behavior, - bool async, + bool async, CommandBehavior behavior, CancellationToken cancellationToken) { - await InternalConnection!.Open(async, cancellationToken); + await InternalConnection!.Open(async, cancellationToken).ConfigureAwait(false); try { return await base.ExecuteReader( - behavior | CommandBehavior.CloseConnection, async, + behavior | CommandBehavior.CloseConnection, cancellationToken) .ConfigureAwait(false); } @@ -39,7 +38,7 @@ internal override async ValueTask ExecuteReader( { try { - await InternalConnection.Close(async); + await InternalConnection.Close(async).ConfigureAwait(false); } catch { diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index fb48053500..f3cdd4b513 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -1,23 +1,27 @@ using System; using System.Collections.Generic; using System.Net.Security; -using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal; namespace Npgsql; -sealed record NpgsqlDataSourceConfiguration( +sealed record NpgsqlDataSourceConfiguration(string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, - RemoteCertificateValidationCallback? UserCertificateValidationCallback, - Action? ClientCertificatesCallback, + NpgsqlTracingOptions TracingOptions, + NpgsqlTypeLoadingOptions TypeLoading, + TransportSecurityHandler TransportSecurityHandler, + IntegratedSecurityHandler IntegratedSecurityHandler, + Action? SslClientAuthenticationOptionsCallback, + Func? PasswordProvider, + Func>? PasswordProviderAsync, Func>? PeriodicPasswordProvider, TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, - List ResolverFactories, - Dictionary UserTypeMappings, + PgTypeInfoResolverChain ResolverChain, + IEnumerable DbTypeResolverFactories, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, - Func? ConnectionInitializerAsync); + Func? ConnectionInitializerAsync, + Action? NegotiateOptionsCallback); diff --git a/src/Npgsql/NpgsqlDiagnostics.cs b/src/Npgsql/NpgsqlDiagnostics.cs new file mode 100644 index 0000000000..0d9ff5f846 --- /dev/null +++ b/src/Npgsql/NpgsqlDiagnostics.cs @@ -0,0 +1,8 @@ +namespace Npgsql; + +static class NpgsqlDiagnostics +{ + public const string ConvertersExperimental = "NPG9001"; + public const string DatabaseInfoExperimental = "NPG9002"; + public const string DbTypeResolverExperimental = "NPG9003"; +} diff --git a/src/Npgsql/NpgsqlEventId.cs b/src/Npgsql/NpgsqlEventId.cs index cf82ea063d..a0bf0bf30c 100644 --- a/src/Npgsql/NpgsqlEventId.cs +++ b/src/Npgsql/NpgsqlEventId.cs @@ -30,7 +30,7 @@ public static class NpgsqlEventId public const int CaughtUserExceptionInNoticeEventHandler = 1901; public const int CaughtUserExceptionInNotificationEventHandler = 1902; public const int ExceptionWhenClosingPhysicalConnection = 1903; - public const int ExceptionWhenOpeningConnectionForMultiplexing = 1904; + public const int ExceptionWhenOpeningConnectionForMultiplexing = 1904; // Multiplexing has been removed #endregion Connection @@ -48,7 +48,7 @@ public static class NpgsqlEventId public const int DerivingParameters = 2500; - public const int ExceptionWhenWritingMultiplexedCommands = 2600; + public const int ExceptionWhenWritingMultiplexedCommands = 2600; // Multiplexing has been removed #endregion Command diff --git a/src/Npgsql/NpgsqlEventSource.cs b/src/Npgsql/NpgsqlEventSource.cs index 43d4a9911b..4122bbd8d5 100644 --- a/src/Npgsql/NpgsqlEventSource.cs +++ b/src/Npgsql/NpgsqlEventSource.cs @@ -1,6 +1,7 @@ using System; -using System.Collections.Generic; +using System.Collections.Concurrent; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Diagnostics.Tracing; using System.Runtime.CompilerServices; @@ -10,13 +11,15 @@ namespace Npgsql; sealed class NpgsqlEventSource : EventSource { public static readonly NpgsqlEventSource Log = new(); + // A static to keep the CWT values from making themselves uncollectable if they would have a reference through the + // NpgsqlEventSource instance to the CWT table, which they would if this was an instance field. + static readonly NpgsqlEventSourceDataSources DataSourceEvents = new(Log); const string EventSourceName = "Npgsql"; internal const int CommandStartId = 3; internal const int CommandStopId = 4; -#if !NETSTANDARD2_0 IncrementingPollingCounter? _bytesWrittenPerSecondCounter; IncrementingPollingCounter? _bytesReadPerSecondCounter; @@ -27,12 +30,6 @@ sealed class NpgsqlEventSource : EventSource PollingCounter? _preparedCommandsRatioCounter; PollingCounter? _poolsCounter; - readonly object _dataSourcesLock = new(); - readonly Dictionary _dataSources = new(); - - PollingCounter? _multiplexingAverageCommandsPerBatchCounter; - PollingCounter? _multiplexingAverageWriteTimePerBatchCounter; -#endif long _bytesWritten; long _bytesRead; @@ -42,10 +39,6 @@ sealed class NpgsqlEventSource : EventSource long _currentCommands; long _failedCommands; - long _multiplexingBatchesSent; - long _multiplexingCommandsSent; - long _multiplexingTicksWritten; - internal NpgsqlEventSource() : base(EventSourceName) {} // NOTE @@ -55,77 +48,55 @@ internal NpgsqlEventSource() : base(EventSourceName) {} // https://blogs.msdn.microsoft.com/vancem/2015/09/14/exploring-eventsource-activity-correlation-and-causation-features/ // - A stop event's event id must be next one after its start event. - internal void BytesWritten(long bytesWritten) => Interlocked.Add(ref _bytesWritten, bytesWritten); - internal void BytesRead(long bytesRead) => Interlocked.Add(ref _bytesRead, bytesRead); - - public void CommandStart(string sql) + internal void BytesWritten(long bytesWritten) { - Interlocked.Increment(ref _totalCommands); - Interlocked.Increment(ref _currentCommands); - NpgsqlSqlEventSource.Log.CommandStart(sql); + if (IsEnabled()) + Interlocked.Add(ref _bytesWritten, bytesWritten); } - [MethodImpl(MethodImplOptions.NoInlining)] - public void CommandStop() + internal void BytesRead(long bytesRead) { - Interlocked.Decrement(ref _currentCommands); - NpgsqlSqlEventSource.Log.CommandStop(); + if (IsEnabled()) + Interlocked.Add(ref _bytesRead, bytesRead); } - internal void CommandStartPrepared() => Interlocked.Increment(ref _totalPreparedCommands); - - internal void CommandFailed() => Interlocked.Increment(ref _failedCommands); - - internal void DataSourceCreated(NpgsqlDataSource dataSource) + internal void CommandStart(string sql) { -#if !NETSTANDARD2_0 - lock (_dataSourcesLock) + if (IsEnabled()) { - _dataSources.Add(dataSource, null); + Interlocked.Increment(ref _totalCommands); + Interlocked.Increment(ref _currentCommands); } -#endif + NpgsqlSqlEventSource.Log.CommandStart(sql); } - internal void MultiplexingBatchSent(int numCommands, Stopwatch stopwatch) + internal void CommandStop() { - // TODO: CAS loop instead of 3 separate interlocked operations? - Interlocked.Increment(ref _multiplexingBatchesSent); - Interlocked.Add(ref _multiplexingCommandsSent, numCommands); - Interlocked.Add(ref _multiplexingTicksWritten, stopwatch.ElapsedTicks); + if (IsEnabled()) + Interlocked.Decrement(ref _currentCommands); + NpgsqlSqlEventSource.Log.CommandStop(); } -#if !NETSTANDARD2_0 - double GetDataSourceCount() + internal void CommandStartPrepared() { - lock (_dataSourcesLock) - { - return _dataSources.Count; - } + if (IsEnabled()) + Interlocked.Increment(ref _totalPreparedCommands); } - double GetMultiplexingAverageCommandsPerBatch() + internal void CommandFailed() { - var batchesSent = Interlocked.Read(ref _multiplexingBatchesSent); - if (batchesSent == 0) - return -1; - - var commandsSent = (double)Interlocked.Read(ref _multiplexingCommandsSent); - return commandsSent / batchesSent; + if (IsEnabled()) + Interlocked.Increment(ref _failedCommands); } - double GetMultiplexingAverageWriteTimePerBatch() - { - var batchesSent = Interlocked.Read(ref _multiplexingBatchesSent); - if (batchesSent == 0) - return -1; + internal bool TryTrackDataSource(string name, NpgsqlDataSource dataSource, [NotNullWhen(true)]out IDisposable? untrack) + => DataSourceEvents.TryTrack(name, dataSource, out untrack); - var ticksWritten = (double)Interlocked.Read(ref _multiplexingTicksWritten); - return ticksWritten / batchesSent / 1000; - } + double GetDataSourceCount() => DataSourceEvents.GetDataSourceCount(); protected override void OnEventCommand(EventCommandEventArgs command) { - if (command.Command == EventCommand.Enable) + if (command.Command is EventCommand.Enable) { // Comment taken from RuntimeEventSource in CoreCLR // NOTE: These counters will NOT be disposed on disable command because we may be introducing @@ -180,30 +151,93 @@ protected override void OnEventCommand(EventCommandEventArgs command) DisplayName = "Connection Pools" }; - _multiplexingAverageCommandsPerBatchCounter = new PollingCounter("multiplexing-average-commands-per-batch", this, GetMultiplexingAverageCommandsPerBatch) - { - DisplayName = "Average commands per multiplexing batch" - }; + DataSourceEvents.EnableAll(); + } + } +} + +// This is a separate class to avoid accidentally making the CWT instance reachable through the value. +// The EventSource is stored in the counters, part of the value, so the EventSource *must not* reference this instance on an instance field. +// This goes for any state captured by the value, which is why the other state has its own object for the value to reference. +// See https://github.com/dotnet/runtime/issues/12255. +sealed class NpgsqlEventSourceDataSources(EventSource eventSource) +{ + readonly ConditionalWeakTable> _dataSources = new(); + readonly StrongBox<(int DataSourceCount, ConcurrentDictionary DataSourceNames)> _nonCwtState = new((0, new())); + + internal double GetDataSourceCount() => _nonCwtState.Value.DataSourceCount; + + internal bool TryTrack(string name, NpgsqlDataSource dataSource, [NotNullWhen(true)]out IDisposable? untrack) + { + untrack = null; + if (!_nonCwtState.Value.DataSourceNames.TryAdd(name, default)) + return false; + + var lazy = new Lazy( + () => new DataSourceEvents(name: name, dataSource, eventSource, _nonCwtState), + LazyThreadSafetyMode.ExecutionAndPublication); + var tracked = _dataSources.TryAdd(dataSource, lazy); - _multiplexingAverageWriteTimePerBatchCounter = new PollingCounter("multiplexing-average-write-time-per-batch", this, GetMultiplexingAverageWriteTimePerBatch) + if (tracked) + { + Interlocked.Increment(ref _nonCwtState.Value.DataSourceCount); + // We must initialize directly when the event source is already enabled. + if (eventSource.IsEnabled()) + untrack = lazy.Value; + else + untrack = new DataSourceEventsDisposable(lazy); + } + + return tracked; + } + + internal void EnableAll() + { + foreach (var dataSourceKv in _dataSources) + { + _ = dataSourceKv.Value.Value; + } + } + + sealed class DataSourceEventsDisposable(Lazy events) : IDisposable + { + public void Dispose() => events.Value.Dispose(); + } + + sealed class DataSourceEvents : IDisposable + { + readonly string _name; + readonly StrongBox<(int Count, ConcurrentDictionary Names)> _state; + readonly PollingCounter _idleConnections; + readonly PollingCounter _busyConnections; + + int _disposed; + + public DataSourceEvents(string name, NpgsqlDataSource dataSource, EventSource eventSource, StrongBox<(int, ConcurrentDictionary)> state) + { + _name = name; + _state = state; + _idleConnections = new($"idle-connections-{name}", eventSource, () => dataSource.Statistics.Idle) { - DisplayName = "Average write time per multiplexing batch", - DisplayUnits = "us" + DisplayName = $"Idle Connections [{name}]" }; - lock (_dataSourcesLock) + _busyConnections = new($"busy-connections-{name}", eventSource, () => dataSource.Statistics.Busy) { - foreach (var dataSource in _dataSources.Keys) - { - if (!_dataSources[dataSource].HasValue) - { - _dataSources[dataSource] = ( - new PollingCounter($"Idle Connections ({dataSource.Settings.ToStringWithoutPassword()}])", this, () => dataSource.Statistics.Idle), - new PollingCounter($"Busy Connections ({dataSource.Settings.ToStringWithoutPassword()}])", this, () => dataSource.Statistics.Busy)); - } - } - } + DisplayName = $"Busy Connections [{name}]" + }; } - } -#endif -} \ No newline at end of file + public void Dispose() + { + if (Interlocked.Exchange(ref _disposed, 1) is 1) + return; + + _idleConnections.Dispose(); + _busyConnections.Dispose(); + + Interlocked.Decrement(ref _state.Value.Count); + var success = _state.Value.Names.TryRemove(_name, out _); + Debug.Assert(success); + } + } +} diff --git a/src/Npgsql/NpgsqlException.cs b/src/Npgsql/NpgsqlException.cs index 38b499b438..d437df72b6 100644 --- a/src/Npgsql/NpgsqlException.cs +++ b/src/Npgsql/NpgsqlException.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data.Common; using System.IO; using System.Net.Sockets; @@ -42,26 +42,15 @@ public NpgsqlException(string? message) /// Specifies whether the exception is considered transient, that is, whether retrying the operation could /// succeed (e.g. a network error or a timeout). /// -#if NET5_0_OR_GREATER public override bool IsTransient -#else - public virtual bool IsTransient -#endif => InnerException is IOException or SocketException or TimeoutException or NpgsqlException { IsTransient: true }; -#if NET6_0_OR_GREATER /// + /// This property is null unless in connection string is set to true. public new NpgsqlBatchCommand? BatchCommand { get; set; } /// protected override DbBatchCommand? DbBatchCommand => BatchCommand; -#else - /// - /// If the exception was thrown as a result of executing a , references the within - /// the batch which triggered the exception. Otherwise . - /// - public NpgsqlBatchCommand? BatchCommand { get; set; } -#endif #region Serialization @@ -70,7 +59,8 @@ public virtual bool IsTransient /// /// The SerializationInfo that holds the serialized object data about the exception being thrown. /// The StreamingContext that contains contextual information about the source or destination. + [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] protected internal NpgsqlException(SerializationInfo info, StreamingContext context) : base(info, context) {} #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 5efe10e94e..d95e645f70 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -1,7 +1,5 @@ using System; using System.Data.Common; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; namespace Npgsql; @@ -49,7 +47,6 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// public override DbDataAdapter CreateDataAdapter() => new NpgsqlDataAdapter(); -#if !NETSTANDARD2_0 /// /// Specifies whether the specific supports the class. /// @@ -59,9 +56,7 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// Specifies whether the specific supports the class. /// public override bool CanCreateCommandBuilder => true; -#endif -#if NET6_0_OR_GREATER /// public override bool CanCreateBatch => true; @@ -70,13 +65,10 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// public override DbBatchCommand CreateBatchCommand() => new NpgsqlBatchCommand(); -#endif -#if NET7_0_OR_GREATER /// public override DbDataSource CreateDataSource(string connectionString) => NpgsqlDataSource.Create(connectionString); -#endif #region IServiceProvider Members @@ -85,44 +77,7 @@ public override DbDataSource CreateDataSource(string connectionString) /// /// An object that specifies the type of service object to get. /// A service object of type serviceType, or null if there is no service object of type serviceType. - [RequiresUnreferencedCode("Legacy EF5 method, not trimming-safe.")] - public object? GetService(Type serviceType) - { - if (serviceType == null) - throw new ArgumentNullException(nameof(serviceType)); - - // In legacy Entity Framework, this is the entry point for obtaining Npgsql's - // implementation of DbProviderServices. We use reflection for all types to - // avoid any dependencies on EF stuff in this project. EF6 (and of course EF Core) do not use this method. - - if (serviceType.FullName != "System.Data.Common.DbProviderServices") - return null; - - // User has requested a legacy EF DbProviderServices implementation. Check our cache first. - if (_legacyEntityFrameworkServices != null) - return _legacyEntityFrameworkServices; - - // First time, attempt to find the EntityFramework5.Npgsql assembly and load the type via reflection - var assemblyName = typeof(NpgsqlFactory).GetTypeInfo().Assembly.GetName(); - assemblyName.Name = "EntityFramework5.Npgsql"; - Assembly npgsqlEfAssembly; - try { - npgsqlEfAssembly = Assembly.Load(new AssemblyName(assemblyName.FullName)); - } catch { - return null; - } - - Type? npgsqlServicesType; - if ((npgsqlServicesType = npgsqlEfAssembly.GetType("Npgsql.NpgsqlServices")) == null || - npgsqlServicesType.GetProperty("Instance") == null) - throw new Exception("EntityFramework5.Npgsql assembly does not seem to contain the correct type!"); - - return _legacyEntityFrameworkServices = npgsqlServicesType - .GetProperty("Instance", BindingFlags.Public | BindingFlags.Static)! - .GetMethod!.Invoke(null, new object[0]); - } - - static object? _legacyEntityFrameworkServices; + public object? GetService(Type serviceType) => null; #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlLargeObjectManager.cs b/src/Npgsql/NpgsqlLargeObjectManager.cs deleted file mode 100644 index 4ec6cb002d..0000000000 --- a/src/Npgsql/NpgsqlLargeObjectManager.cs +++ /dev/null @@ -1,256 +0,0 @@ -using Npgsql.Util; -using System; -using System.Data; -using System.Text; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql; - -/// -/// Large object manager. This class can be used to store very large files in a PostgreSQL database. -/// -public class NpgsqlLargeObjectManager -{ - const int InvWrite = 0x00020000; - const int InvRead = 0x00040000; - - internal NpgsqlConnection Connection { get; } - - /// - /// The largest chunk size (in bytes) read and write operations will read/write each roundtrip to the network. Default 4 MB. - /// - public int MaxTransferBlockSize { get; set; } - - /// - /// Creates an NpgsqlLargeObjectManager for this connection. The connection must be opened to perform remote operations. - /// - /// - public NpgsqlLargeObjectManager(NpgsqlConnection connection) - { - Connection = connection; - MaxTransferBlockSize = 4 * 1024 * 1024; // 4MB - } - - /// - /// Execute a function - /// - internal async Task ExecuteFunction(string function, bool async, CancellationToken cancellationToken, params object[] arguments) - { - using var command = Connection.CreateCommand(); - var stringBuilder = new StringBuilder("SELECT * FROM ").Append(function).Append('('); - - for (var i = 0; i < arguments.Length; i++) - { - if (i > 0) - stringBuilder.Append(", "); - stringBuilder.Append('$').Append(i + 1); - command.Parameters.Add(new NpgsqlParameter { Value = arguments[i] }); - } - - stringBuilder.Append(')'); - command.CommandText = stringBuilder.ToString(); - - return (T)(async ? await command.ExecuteScalarAsync(cancellationToken) : command.ExecuteScalar())!; - } - - /// - /// Execute a function that returns a byte array - /// - /// - internal async Task ExecuteFunctionGetBytes( - string function, byte[] buffer, int offset, int len, bool async, CancellationToken cancellationToken, params object[] arguments) - { - using var command = Connection.CreateCommand(); - var stringBuilder = new StringBuilder("SELECT * FROM ").Append(function).Append('('); - - for (var i = 0; i < arguments.Length; i++) - { - if (i > 0) - stringBuilder.Append(", "); - stringBuilder.Append('$').Append(i + 1); - command.Parameters.Add(new NpgsqlParameter { Value = arguments[i] }); - } - - stringBuilder.Append(')'); - command.CommandText = stringBuilder.ToString(); - - var reader = async - ? await command.ExecuteReaderAsync(CommandBehavior.SequentialAccess, cancellationToken) - : command.ExecuteReader(CommandBehavior.SequentialAccess); - try - { - if (async) - await reader.ReadAsync(cancellationToken); - else - reader.Read(); - - return (int)reader.GetBytes(0, 0, buffer, offset, len); - } - finally - { - if (async) - await reader.DisposeAsync(); - else - reader.Dispose(); - } - } - - /// - /// Create an empty large object in the database. If an oid is specified but is already in use, an PostgresException will be thrown. - /// - /// A preferred oid, or specify 0 if one should be automatically assigned - /// The oid for the large object created - /// If an oid is already in use - public uint Create(uint preferredOid = 0) => Create(preferredOid, false).GetAwaiter().GetResult(); - - // Review unused parameters - /// - /// Create an empty large object in the database. If an oid is specified but is already in use, an PostgresException will be thrown. - /// - /// A preferred oid, or specify 0 if one should be automatically assigned - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// The oid for the large object created - /// If an oid is already in use - public Task CreateAsync(uint preferredOid, CancellationToken cancellationToken = default) - => Create(preferredOid, true, cancellationToken); - - Task Create(uint preferredOid, bool async, CancellationToken cancellationToken = default) - => ExecuteFunction("lo_create", async, cancellationToken, (int)preferredOid); - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// A transaction snapshot is taken by the backend when the object is opened with only read permissions. - /// When reading from this object, the contents reflects the time when the snapshot was taken. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// An NpgsqlLargeObjectStream - public NpgsqlLargeObjectStream OpenRead(uint oid) - => OpenRead(oid, false).GetAwaiter().GetResult(); - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// A transaction snapshot is taken by the backend when the object is opened with only read permissions. - /// When reading from this object, the contents reflects the time when the snapshot was taken. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// An NpgsqlLargeObjectStream - public Task OpenReadAsync(uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return OpenRead(oid, true, cancellationToken); - } - - async Task OpenRead(uint oid, bool async, CancellationToken cancellationToken = default) - { - var fd = await ExecuteFunction("lo_open", async, cancellationToken, (int)oid, InvRead); - return new NpgsqlLargeObjectStream(this, fd, false); - } - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// An NpgsqlLargeObjectStream - public NpgsqlLargeObjectStream OpenReadWrite(uint oid) - => OpenReadWrite(oid, false).GetAwaiter().GetResult(); - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// An NpgsqlLargeObjectStream - public Task OpenReadWriteAsync(uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return OpenReadWrite(oid, true, cancellationToken); - } - - async Task OpenReadWrite(uint oid, bool async, CancellationToken cancellationToken = default) - { - var fd = await ExecuteFunction("lo_open", async, cancellationToken, (int)oid, InvRead | InvWrite); - return new NpgsqlLargeObjectStream(this, fd, true); - } - - /// - /// Deletes a large object on the backend. - /// - /// Oid of the object to delete - public void Unlink(uint oid) - => ExecuteFunction("lo_unlink", false, CancellationToken.None, (int)oid).GetAwaiter().GetResult(); - - /// - /// Deletes a large object on the backend. - /// - /// Oid of the object to delete - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task UnlinkAsync(uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteFunction("lo_unlink", true, cancellationToken, (int)oid); - } - - /// - /// Exports a large object stored in the database to a file on the backend. This requires superuser permissions. - /// - /// Oid of the object to export - /// Path to write the file on the backend - public void ExportRemote(uint oid, string path) - => ExecuteFunction("lo_export", false, CancellationToken.None, (int)oid, path).GetAwaiter().GetResult(); - - /// - /// Exports a large object stored in the database to a file on the backend. This requires superuser permissions. - /// - /// Oid of the object to export - /// Path to write the file on the backend - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task ExportRemoteAsync(uint oid, string path, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteFunction("lo_export", true, cancellationToken, (int)oid, path); - } - - /// - /// Imports a large object to be stored as a large object in the database from a file stored on the backend. This requires superuser permissions. - /// - /// Path to read the file on the backend - /// A preferred oid, or specify 0 if one should be automatically assigned - public void ImportRemote(string path, uint oid = 0) - => ExecuteFunction("lo_import", false, CancellationToken.None, path, (int)oid).GetAwaiter().GetResult(); - - /// - /// Imports a large object to be stored as a large object in the database from a file stored on the backend. This requires superuser permissions. - /// - /// Path to read the file on the backend - /// A preferred oid, or specify 0 if one should be automatically assigned - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task ImportRemoteAsync(string path, uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteFunction("lo_import", true, cancellationToken, path, (int)oid); - } - - /// - /// Since PostgreSQL 9.3, large objects larger than 2GB can be handled, up to 4TB. - /// This property returns true whether the PostgreSQL version is >= 9.3. - /// - public bool Has64BitSupport => Connection.PostgreSqlVersion.IsGreaterOrEqual(9, 3); -} \ No newline at end of file diff --git a/src/Npgsql/NpgsqlLargeObjectStream.cs b/src/Npgsql/NpgsqlLargeObjectStream.cs deleted file mode 100644 index 33fe99b5fc..0000000000 --- a/src/Npgsql/NpgsqlLargeObjectStream.cs +++ /dev/null @@ -1,318 +0,0 @@ -using Npgsql.Util; -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql; - -/// -/// An interface to remotely control the seekable stream for an opened large object on a PostgreSQL server. -/// Note that the OpenRead/OpenReadWrite method as well as all operations performed on this stream must be wrapped inside a database transaction. -/// -public sealed class NpgsqlLargeObjectStream : Stream -{ - readonly NpgsqlLargeObjectManager _manager; - readonly int _fd; - long _pos; - readonly bool _writeable; - bool _disposed; - - internal NpgsqlLargeObjectStream(NpgsqlLargeObjectManager manager, int fd, bool writeable) - { - _manager = manager; - _fd = fd; - _pos = 0; - _writeable = writeable; - } - - void CheckDisposed() - { - if (_disposed) - throw new InvalidOperationException("Object disposed"); - } - - /// - /// Since PostgreSQL 9.3, large objects larger than 2GB can be handled, up to 4TB. - /// This property returns true whether the PostgreSQL version is >= 9.3. - /// - public bool Has64BitSupport => _manager.Connection.PostgreSqlVersion.IsGreaterOrEqual(9, 3); - - /// - /// Reads count bytes from the large object. The only case when fewer bytes are read is when end of stream is reached. - /// - /// The buffer where read data should be stored. - /// The offset in the buffer where the first byte should be read. - /// The maximum number of bytes that should be read. - /// How many bytes actually read, or 0 if end of file was already reached. - public override int Read(byte[] buffer, int offset, int count) - => Read(buffer, offset, count, false).GetAwaiter().GetResult(); - - /// - /// Reads count bytes from the large object. The only case when fewer bytes are read is when end of stream is reached. - /// - /// The buffer where read data should be stored. - /// The offset in the buffer where the first byte should be read. - /// The maximum number of bytes that should be read. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// How many bytes actually read, or 0 if end of file was already reached. - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return Read(buffer, offset, count, true, cancellationToken); - } - - async Task Read(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentOutOfRangeException(nameof(offset)); - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count)); - if (buffer.Length - offset < count) - throw new ArgumentException("Invalid offset or count for this buffer"); - - CheckDisposed(); - - var chunkCount = Math.Min(count, _manager.MaxTransferBlockSize); - var read = 0; - - while (read < count) - { - var bytesRead = await _manager.ExecuteFunctionGetBytes( - "loread", buffer, offset + read, count - read, async, cancellationToken, _fd, chunkCount); - _pos += bytesRead; - read += bytesRead; - if (bytesRead < chunkCount) - { - return read; - } - } - return read; - } - - /// - /// Writes count bytes to the large object. - /// - /// The buffer to write data from. - /// The offset in the buffer at which to begin copying bytes. - /// The number of bytes to write. - public override void Write(byte[] buffer, int offset, int count) - => Write(buffer, offset, count, false).GetAwaiter().GetResult(); - - /// - /// Writes count bytes to the large object. - /// - /// The buffer to write data from. - /// The offset in the buffer at which to begin copying bytes. - /// The number of bytes to write. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return Write(buffer, offset, count, true, cancellationToken); - } - - async Task Write(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentOutOfRangeException(nameof(offset)); - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count)); - if (buffer.Length - offset < count) - throw new ArgumentException("Invalid offset or count for this buffer"); - - CheckDisposed(); - - if (!_writeable) - throw new NotSupportedException("Write cannot be called on a stream opened with no write permissions"); - - var totalWritten = 0; - - while (totalWritten < count) - { - var chunkSize = Math.Min(count - totalWritten, _manager.MaxTransferBlockSize); - var bytesWritten = await _manager.ExecuteFunction("lowrite", async, cancellationToken, _fd, new ArraySegment(buffer, offset + totalWritten, chunkSize)); - totalWritten += bytesWritten; - - if (bytesWritten != chunkSize) - throw new InvalidOperationException($"Internal Npgsql bug, please report"); - - _pos += bytesWritten; - } - } - - /// - /// CanTimeout always returns false. - /// - public override bool CanTimeout => false; - - /// - /// CanRead always returns true, unless the stream has been closed. - /// - public override bool CanRead => !_disposed; - - /// - /// CanWrite returns true if the stream was opened with write permissions, and the stream has not been closed. - /// - public override bool CanWrite => _writeable && !_disposed; - - /// - /// CanSeek always returns true, unless the stream has been closed. - /// - public override bool CanSeek => !_disposed; - - /// - /// Returns the current position in the stream. Getting the current position does not need a round-trip to the server, however setting the current position does. - /// - public override long Position - { - get - { - CheckDisposed(); - return _pos; - } - set => Seek(value, SeekOrigin.Begin); - } - - /// - /// Gets the length of the large object. This internally seeks to the end of the stream to retrieve the length, and then back again. - /// - public override long Length => GetLength(false).GetAwaiter().GetResult(); - - /// - /// Gets the length of the large object. This internally seeks to the end of the stream to retrieve the length, and then back again. - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task GetLengthAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return GetLength(true); - } - - async Task GetLength(bool async) - { - CheckDisposed(); - var old = _pos; - var retval = await Seek(0, SeekOrigin.End, async); - if (retval != old) - await Seek(old, SeekOrigin.Begin, async); - return retval; - } - - /// - /// Seeks in the stream to the specified position. This requires a round-trip to the backend. - /// - /// A byte offset relative to the origin parameter. - /// A value of type SeekOrigin indicating the reference point used to obtain the new position. - /// - public override long Seek(long offset, SeekOrigin origin) - => Seek(offset, origin, false).GetAwaiter().GetResult(); - - /// - /// Seeks in the stream to the specified position. This requires a round-trip to the backend. - /// - /// A byte offset relative to the origin parameter. - /// A value of type SeekOrigin indicating the reference point used to obtain the new position. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task SeekAsync(long offset, SeekOrigin origin, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Seek(offset, origin, true, cancellationToken); - } - - async Task Seek(long offset, SeekOrigin origin, bool async, CancellationToken cancellationToken = default) - { - if (origin < SeekOrigin.Begin || origin > SeekOrigin.End) - throw new ArgumentException("Invalid origin"); - if (!Has64BitSupport && offset != (int)offset) - throw new ArgumentOutOfRangeException(nameof(offset), "offset must fit in 32 bits for PostgreSQL versions older than 9.3"); - - CheckDisposed(); - - return _manager.Has64BitSupport - ? _pos = await _manager.ExecuteFunction("lo_lseek64", async, cancellationToken, _fd, offset, (int)origin) - : _pos = await _manager.ExecuteFunction("lo_lseek", async, cancellationToken, _fd, (int)offset, (int)origin); - } - - /// - /// Does nothing. - /// - public override void Flush() {} - - /// - /// Truncates or enlarges the large object to the given size. If enlarging, the large object is extended with null bytes. - /// For PostgreSQL versions earlier than 9.3, the value must fit in an Int32. - /// - /// Number of bytes to either truncate or enlarge the large object. - public override void SetLength(long value) - => SetLength(value, false).GetAwaiter().GetResult(); - - /// - /// Truncates or enlarges the large object to the given size. If enlarging, the large object is extended with null bytes. - /// For PostgreSQL versions earlier than 9.3, the value must fit in an Int32. - /// - /// Number of bytes to either truncate or enlarge the large object. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task SetLength(long value, CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - using (NoSynchronizationContextScope.Enter()) - return SetLength(value, true, cancellationToken); - } - - async Task SetLength(long value, bool async, CancellationToken cancellationToken = default) - { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value)); - if (!Has64BitSupport && value != (int)value) - throw new ArgumentOutOfRangeException(nameof(value), "offset must fit in 32 bits for PostgreSQL versions older than 9.3"); - - CheckDisposed(); - - if (!_writeable) - throw new NotSupportedException("SetLength cannot be called on a stream opened with no write permissions"); - - if (_manager.Has64BitSupport) - await _manager.ExecuteFunction("lo_truncate64", async, cancellationToken, _fd, value); - else - await _manager.ExecuteFunction("lo_truncate", async, cancellationToken, _fd, (int)value); - } - - /// - /// Releases resources at the backend allocated for this stream. - /// - public override void Close() - { - if (!_disposed) - { - _manager.ExecuteFunction("lo_close", false, CancellationToken.None, _fd).GetAwaiter().GetResult(); - _disposed = true; - } - } - - /// - /// Releases resources at the backend allocated for this stream, iff disposing is true. - /// - /// Whether to release resources allocated at the backend. - protected override void Dispose(bool disposing) - { - if (disposing) - { - Close(); - } - } -} \ No newline at end of file diff --git a/src/Npgsql/NpgsqlLoggingConfiguration.cs b/src/Npgsql/NpgsqlLoggingConfiguration.cs index 745cf476cb..988b8b730e 100644 --- a/src/Npgsql/NpgsqlLoggingConfiguration.cs +++ b/src/Npgsql/NpgsqlLoggingConfiguration.cs @@ -1,4 +1,4 @@ -using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlMetricsOptions.cs b/src/Npgsql/NpgsqlMetricsOptions.cs new file mode 100644 index 0000000000..b0e7332700 --- /dev/null +++ b/src/Npgsql/NpgsqlMetricsOptions.cs @@ -0,0 +1,9 @@ +namespace Npgsql; + +/// +/// Options to configure Npgsql's support for OpenTelemetry metrics. +/// Currently no options are available. +/// +public class NpgsqlMetricsOptions +{ +} diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 6762de9ad4..4e6f42eeae 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -1,10 +1,9 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Util; using System; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -31,14 +30,13 @@ public sealed class NpgsqlMultiHostDataSource : NpgsqlDataSource volatile int _roundRobinIndex = -1; internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) - : base(settings, dataSourceConfig) + : base(settings, dataSourceConfig, reportMetrics: false) { var hosts = settings.Host!.Split(','); _pools = new NpgsqlDataSource[hosts.Length]; for (var i = 0; i < hosts.Length; i++) { var poolSettings = settings.Clone(); - Debug.Assert(!poolSettings.Multiplexing); var host = hosts[i].AsSpan().Trim(); if (NpgsqlConnectionStringBuilder.TrySplitHostPort(host, out var newHost, out var newPort)) { @@ -49,16 +47,19 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq poolSettings.Host = host.ToString(); _pools[i] = settings.Pooling - ? new PoolingDataSource(poolSettings, dataSourceConfig, this) + ? new PoolingDataSource(poolSettings, dataSourceConfig) : new UnpooledDataSource(poolSettings, dataSourceConfig); } - var targetSessionAttributeValues = Enum.GetValues(typeof(TargetSessionAttributes)).Cast().ToArray(); - _wrappers = new MultiHostDataSourceWrapper[targetSessionAttributeValues.Max(t => (int)t) + 1]; + var targetSessionAttributeValues = Enum.GetValues(); + var highestValue = 0; + foreach (var value in targetSessionAttributeValues) + if ((int)value > highestValue) + highestValue = (int)value; + + _wrappers = new MultiHostDataSourceWrapper[highestValue + 1]; foreach (var targetSessionAttribute in targetSessionAttributeValues) - { _wrappers[(int)targetSessionAttribute] = new(this, targetSessionAttribute); - } } /// @@ -182,7 +183,7 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) { if (databaseState == DatabaseState.Unknown) { - databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); Debug.Assert(databaseState != DatabaseState.Unknown); if (!stateValidator(databaseState, preferredType)) { @@ -195,7 +196,7 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) } else { - connector = await pool.OpenNewConnector(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + connector = await pool.OpenNewConnector(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); if (connector is not null) { if (databaseState == DatabaseState.Unknown) @@ -203,7 +204,7 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) // While opening a new connector we might have refreshed the database state, check again databaseState = pool.GetDatabaseState(); if (databaseState == DatabaseState.Unknown) - databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); Debug.Assert(databaseState != DatabaseState.Unknown); if (!stateValidator(databaseState, preferredType)) { @@ -216,6 +217,12 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) } } } + catch (OperationCanceledException oce) when (cancellationToken.IsCancellationRequested && oce.CancellationToken == cancellationToken) + { + if (connector is not null) + pool.Return(connector); + throw; + } catch (Exception ex) { exceptions.Add(ex); @@ -253,13 +260,13 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) try { - connector = await pool.Get(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + connector = await pool.Get(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); if (databaseState == DatabaseState.Unknown) { // Get might have opened a new physical connection and refreshed the database state, check again databaseState = pool.GetDatabaseState(); if (databaseState == DatabaseState.Unknown) - databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); Debug.Assert(databaseState != DatabaseState.Unknown); if (!stateValidator(databaseState, preferredType)) @@ -298,27 +305,36 @@ internal override async ValueTask Get( var preferredType = GetTargetSessionAttributes(conn); var checkUnpreferred = preferredType is TargetSessionAttributes.PreferPrimary or TargetSessionAttributes.PreferStandby; - var connector = await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken) ?? + var connector = await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) ?? (checkUnpreferred ? - await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken) + await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) : null) ?? - await TryGet(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken) ?? + await TryGet(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) ?? (checkUnpreferred ? - await TryGet(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken) + await TryGet(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) : null); return connector ?? throw NoSuitableHostsException(exceptions); } static NpgsqlException NoSuitableHostsException(IList exceptions) - => exceptions.Count == 0 + { + return exceptions.Count == 0 ? new NpgsqlException("No suitable host was found.") - : exceptions[0] is PostgresException firstException && - exceptions.All(x => x is PostgresException ex && ex.SqlState == firstException.SqlState) + : exceptions[0] is PostgresException firstException && AllEqual(firstException, exceptions) ? firstException : new NpgsqlException("Unable to connect to a suitable host. Check inner exception for more details.", new AggregateException(exceptions)); + static bool AllEqual(PostgresException first, IList exceptions) + { + foreach (var x in exceptions) + if (x is not PostgresException ex || ex.SqlState != first.SqlState) + return false; + return true; + } + } + int GetRoundRobinIndex() { while (true) @@ -351,7 +367,8 @@ internal override bool TryGetIdleConnector([NotNullWhen(true)] out NpgsqlConnect internal override ValueTask OpenNewConnector(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) => throw new NpgsqlException("Npgsql bug: trying to open a new connector from " + nameof(NpgsqlMultiHostDataSource)); - internal override void Clear() + /// + public override void Clear() { foreach (var pool in _pools) pool.Clear(); @@ -443,6 +460,6 @@ bool TryGetValidConnector(List list, TargetSessionAttributes pr static TargetSessionAttributes GetTargetSessionAttributes(NpgsqlConnection connection) => connection.Settings.TargetSessionAttributesParsed ?? (PostgresEnvironment.TargetSessionAttributes is { } s - ? NpgsqlConnectionStringBuilder.ParseTargetSessionAttributes(s) + ? NpgsqlConnectionStringBuilder.ParseTargetSessionAttributes(s.ToLowerInvariant()) : TargetSessionAttributes.Any); } diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index d8e1b927ed..f79942cbf3 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -1,15 +1,15 @@ -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; using System; using System.Collections; using System.Collections.Generic; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.IO; using System.Runtime.CompilerServices; +using Npgsql.BackendMessages; +using Npgsql.Internal.Postgres; namespace Npgsql; @@ -21,7 +21,6 @@ namespace Npgsql; public sealed class NpgsqlNestedDataReader : DbDataReader { readonly NpgsqlDataReader _outermostReader; - ulong _uniqueOutermostReaderRowId; readonly NpgsqlNestedDataReader? _outerNestedReader; NpgsqlNestedDataReader? _cachedFreeNestedDataReader; PostgresCompositeType? _compositeType; @@ -31,38 +30,48 @@ public sealed class NpgsqlNestedDataReader : DbDataReader int _nextRowBufferPos; ReaderState _readerState; - readonly List _columns = new(); + readonly List _columns = []; + long _startPos; - readonly struct ColumnInfo + DataFormat DataFormat => DataFormat.Binary; + + readonly struct NestedColumnInfo { - public readonly uint TypeOid; - public readonly int BufferPos; - public readonly NpgsqlTypeHandler TypeHandler; + public PostgresType PostgresType { get; } + public int BufferPos { get; } + public ReadConversionContext LastInfo { get; init; } + public PgConcreteTypeInfo ObjectTypeInfo { get; } + public PgFieldBinding ObjectBinding { get; } - public ColumnInfo(uint typeOid, int bufferPos, NpgsqlTypeHandler typeHandler) + public NestedColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectTypeInfo, DataFormat format) { - TypeOid = typeOid; + PostgresType = postgresType; BufferPos = bufferPos; - TypeHandler = typeHandler; + ObjectTypeInfo = objectTypeInfo.MakeConcreteForField(Field.CreateUnspecified(objectTypeInfo.Options.ToCanonicalTypeId(postgresType))); + if (!ObjectTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(typeof(object), objectTypeInfo.Options, ObjectTypeInfo.PgTypeId, resolved: true); + ObjectBinding = ObjectTypeInfo.BindField(format); } + + public Field Field => Field.CreateUnspecified(ObjectTypeInfo.PgTypeId); } - NpgsqlReadBuffer Buffer => _outermostReader.Buffer; - TypeMapper TypeMapper => _outermostReader.Connector.TypeMapper; + PgReader PgReader => _outermostReader.Buffer.PgReader; + PgSerializerOptions SerializerOptions => _outermostReader.Connector.SerializerOptions; internal NpgsqlNestedDataReader(NpgsqlDataReader outermostReader, NpgsqlNestedDataReader? outerNestedReader, - ulong uniqueOutermostReaderRowId, int depth, PostgresCompositeType? compositeType) + int depth, PostgresCompositeType? compositeType) { _outermostReader = outermostReader; _outerNestedReader = outerNestedReader; - _uniqueOutermostReaderRowId = uniqueOutermostReaderRowId; _depth = depth; _compositeType = compositeType; + _startPos = PgReader.GetFieldStartPos(this); } - internal void Init(ulong uniqueOutermostReaderRowId, PostgresCompositeType? compositeType) + internal void Init(PostgresCompositeType? compositeType) { - _uniqueOutermostReaderRowId = uniqueOutermostReaderRowId; + _startPos = PgReader.GetFieldStartPos(this); _columns.Clear(); _numRows = 0; _nextRowIndex = 0; @@ -73,9 +82,9 @@ internal void Init(ulong uniqueOutermostReaderRowId, PostgresCompositeType? comp internal void InitArray() { - var dimensions = Buffer.ReadInt32(); - var containsNulls = Buffer.ReadInt32() == 1; - Buffer.ReadUInt32(); // Element OID. Ignored. + var dimensions = PgReader.ReadInt32(); + var containsNulls = PgReader.ReadInt32() == 1; + PgReader.ReadUInt32(); // Element OID. Ignored. if (containsNulls) throw new InvalidOperationException("Record array contains null record"); @@ -86,19 +95,19 @@ internal void InitArray() if (dimensions != 1) throw new InvalidOperationException("Cannot read a multidimensional array with a nested DbDataReader"); - _numRows = Buffer.ReadInt32(); - Buffer.ReadInt32(); // Lower bound + _numRows = PgReader.ReadInt32(); + PgReader.ReadInt32(); // Lower bound if (_numRows > 0) - Buffer.ReadInt32(); // Length of first row + PgReader.ReadInt32(); // Length of first row - _nextRowBufferPos = Buffer.ReadPosition; + _nextRowBufferPos = PgReader.GetFieldOffset(this); } internal void InitSingleRow() { _numRows = 1; - _nextRowBufferPos = Buffer.ReadPosition; + _nextRowBufferPos = PgReader.GetFieldOffset(this); } /// @@ -140,7 +149,7 @@ public override bool HasRows /// public override bool IsClosed => _readerState == ReaderState.Closed || _readerState == ReaderState.Disposed - || _outermostReader.IsClosed || _uniqueOutermostReaderRowId != _outermostReader.UniqueRowId; + || _outermostReader.IsClosed || PgReader.GetFieldStartPos(this) != _startPos; /// public override int RecordsAffected => -1; @@ -173,34 +182,29 @@ public override bool IsClosed /// public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) { - if (dataOffset < 0 || dataOffset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); + ArgumentOutOfRangeException.ThrowIfNegative(dataOffset); + ArgumentOutOfRangeException.ThrowIfGreaterThan(dataOffset, int.MaxValue); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between {0} and {(buffer.Length)}"); + throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between {0} and {buffer.Length - bufferOffset}"); - - var field = CheckRowAndColumnAndSeek(ordinal); - var handler = field.Handler; - if (!(handler is ByteaHandler)) - throw new InvalidCastException("GetBytes() not supported for type " + field.Handler.PgDisplayName); - - if (field.Length == -1) - throw new InvalidCastException("field is null"); + throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); - var dataOffset2 = (int)dataOffset; - if (dataOffset2 > field.Length) - throw new ArgumentOutOfRangeException(nameof(dataOffset), - $"attempting to read out of bounds from the column data, dataOffset must be between {0} and {field.Length}"); + var columnLen = CheckRowAndColumnAndSeek(ordinal, out var column); + if (columnLen is -1) + ThrowHelper.ThrowInvalidCastException_NoValue(); - Buffer.ReadPosition += dataOffset2; + if (buffer is null) + return columnLen; - length = Math.Min(length, field.Length - dataOffset2); + using var _ = PgReader.BeginNestedRead(columnLen, Size.Unknown); - if (buffer == null) - return length; + // Move to offset + PgReader.Seek((int)dataOffset); - return Buffer.Read(new Span(buffer, bufferOffset, length)); + // At offset, read into buffer. + length = Math.Min(length, PgReader.CurrentRemaining); + PgReader.ReadBytes(new Span(buffer, bufferOffset, length)); + return length; } /// public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) @@ -217,26 +221,26 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int /// A data reader. public new NpgsqlNestedDataReader GetData(int ordinal) { - var field = CheckRowAndColumnAndSeek(ordinal); - var type = field.Handler.PostgresType; + var valueLength = CheckRowAndColumnAndSeek(ordinal, out var column); + var type = column.PostgresType; var isArray = type is PostgresArrayType; var elementType = isArray ? ((PostgresArrayType)type).Element : type; var compositeType = elementType as PostgresCompositeType; if (elementType.InternalName != "record" && compositeType == null) throw new InvalidCastException("GetData() not supported for type " + type.DisplayName); - if (field.Length == -1) + if (valueLength == -1) throw new InvalidCastException("field is null"); var reader = _cachedFreeNestedDataReader; if (reader != null) { _cachedFreeNestedDataReader = null; - reader.Init(_uniqueOutermostReaderRowId, compositeType); + reader.Init(compositeType); } else { - reader = new NpgsqlNestedDataReader(_outermostReader, this, _uniqueOutermostReaderRowId, _depth + 1, compositeType); + reader = new NpgsqlNestedDataReader(_outermostReader, this, _depth + 1, compositeType); } if (isArray) reader.InitArray(); @@ -249,7 +253,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int public override string GetDataTypeName(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.TypeHandler.PgDisplayName; + return column.PostgresType.DisplayName; } /// @@ -285,26 +289,28 @@ public override int GetOrdinal(string name) } /// + [UnconditionalSuppressMessage("ILLink", "IL2093", Justification = "No members are dynamically accessed by Npgsql via NpgsqlNestedDataReader.GetFieldType.")] public override Type GetFieldType(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.TypeHandler.GetFieldType(); + return column.ObjectTypeInfo.Type; } /// public override object GetValue(int ordinal) { - var column = CheckRowAndColumnAndSeek(ordinal); - if (column.Length == -1) + var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); + if (columnLength == -1) return DBNull.Value; - return column.Handler.ReadAsObject(Buffer, column.Length); + + using var _ = PgReader.BeginNestedRead(columnLength, column.ObjectBinding.BufferRequirement); + return column.ObjectTypeInfo.Converter.ReadAsObject(PgReader); } /// public override int GetValues(object[] values) { - if (values == null) - throw new ArgumentNullException(nameof(values)); + ArgumentNullException.ThrowIfNull(values); CheckOnRow(); var count = Math.Min(FieldCount, values.Length); @@ -315,7 +321,7 @@ public override int GetValues(object[] values) /// public override bool IsDBNull(int ordinal) - => CheckRowAndColumnAndSeek(ordinal).Length == -1; + => CheckRowAndColumnAndSeek(ordinal, out _) == -1; /// public override T GetFieldValue(int ordinal) @@ -326,54 +332,23 @@ public override T GetFieldValue(int ordinal) if (typeof(T) == typeof(TextReader)) return (T)(object)GetTextReader(ordinal); - var field = CheckRowAndColumnAndSeek(ordinal); + var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); + var info = GetOrAddConverterInfo(typeof(T), column, ordinal); - if (field.Length == -1) + if (columnLength == -1) { // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) + if (default(T) is null && typeof(T).IsValueType) return default!; if (typeof(T) == typeof(object)) return (T)(object)DBNull.Value; - throw new InvalidCastException("field is null"); + ThrowHelper.ThrowInvalidCastException_NoValue(); } - return NullableHandler.Exists - ? NullableHandler.Read(field.Handler, Buffer, field.Length, fieldDescription: null) - : typeof(T) == typeof(object) - ? (T)field.Handler.ReadAsObject(Buffer, field.Length, fieldDescription: null) - : field.Handler.Read(Buffer, field.Length, fieldDescription: null); - } - - /// - public override Type GetProviderSpecificFieldType(int ordinal) - { - var column = CheckRowAndColumn(ordinal); - return column.TypeHandler.GetProviderSpecificFieldType(); - } - - /// - public override object GetProviderSpecificValue(int ordinal) - { - var column = CheckRowAndColumnAndSeek(ordinal); - if (column.Length == -1) - return DBNull.Value; - return column.Handler.ReadPsvAsObject(Buffer, column.Length); - } - - /// - public override int GetProviderSpecificValues(object[] values) - { - if (values == null) - throw new ArgumentNullException(nameof(values)); - CheckOnRow(); - - var count = Math.Min(FieldCount, values.Length); - for (var i = 0; i < count; i++) - values[i] = GetProviderSpecificValue(i); - return count; + using var _ = PgReader.BeginNestedRead(columnLength, info.Binding.BufferRequirement); + return info.TypeInfo.Converter.Read(PgReader); } /// @@ -381,7 +356,7 @@ public override bool Read() { CheckResultSet(); - Buffer.ReadPosition = _nextRowBufferPos; + PgReader.Seek(_nextRowBufferPos); if (_nextRowIndex == _numRows) { _readerState = ReaderState.AfterRows; @@ -389,27 +364,38 @@ public override bool Read() } if (_nextRowIndex++ != 0) - Buffer.ReadInt32(); // Length of record + PgReader.ReadInt32(); // Length of record - var numColumns = Buffer.ReadInt32(); + var numColumns = PgReader.ReadInt32(); for (var i = 0; i < numColumns; i++) { - var typeOid = Buffer.ReadUInt32(); - var bufferPos = Buffer.ReadPosition; + var typeOid = PgReader.ReadUInt32(); + var bufferPos = PgReader.GetFieldOffset(this); if (i >= _columns.Count) - _columns.Add(new ColumnInfo(typeOid, bufferPos, TypeMapper.ResolveByOID(typeOid))); + { + var pgType = SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); + var pgTypeId = SerializerOptions.ToCanonicalTypeId(pgType); + _columns.Add(new NestedColumnInfo(pgType, bufferPos, + AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), DataFormat)); + } else - _columns[i] = new ColumnInfo(typeOid, bufferPos, - _columns[i].TypeOid == typeOid ? _columns[i].TypeHandler : TypeMapper.ResolveByOID(typeOid)); + { + var pgType = _columns[i].PostgresType.OID == typeOid + ? _columns[i].PostgresType + : SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); + var pgTypeId = SerializerOptions.ToCanonicalTypeId(pgType); + _columns[i] = new NestedColumnInfo(pgType, bufferPos, + AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), DataFormat); + } - var columnLen = Buffer.ReadInt32(); + var columnLen = PgReader.ReadInt32(); if (columnLen >= 0) - Buffer.Skip(columnLen); + PgReader.Consume(columnLen); } _columns.RemoveRange(numColumns, _columns.Count - numColumns); - _nextRowBufferPos = Buffer.ReadPosition; + _nextRowBufferPos = PgReader.GetFieldOffset(this); _readerState = ReaderState.OnRow; return true; @@ -484,7 +470,7 @@ void CheckOnRow() throw new InvalidOperationException("No row is available"); } - ColumnInfo CheckRowAndColumn(int column) + NestedColumnInfo CheckRowAndColumn(int column) { CheckOnRow(); @@ -494,12 +480,30 @@ ColumnInfo CheckRowAndColumn(int column) return _columns[column]; } - (NpgsqlTypeHandler Handler, int Length) CheckRowAndColumnAndSeek(int ordinal) + int CheckRowAndColumnAndSeek(int ordinal, out NestedColumnInfo nestedColumn) { - var column = CheckRowAndColumn(ordinal); - Buffer.ReadPosition = column.BufferPos; - var len = Buffer.ReadInt32(); - return (column.TypeHandler, len); + nestedColumn = CheckRowAndColumn(ordinal); + PgReader.Seek(nestedColumn.BufferPos); + return PgReader.ReadInt32(); + } + + ReadConversionContext GetOrAddConverterInfo(Type type, NestedColumnInfo nestedColumn, int ordinal) + { + if (nestedColumn.LastInfo is { IsDefault: false } lastInfo && lastInfo.TypeInfo.Type == type) + return lastInfo; + + var objectInfo = (TypeInfo: nestedColumn.ObjectTypeInfo, Binding: nestedColumn.ObjectBinding); + if (objectInfo.TypeInfo is not null && (typeof(object) == type || objectInfo.TypeInfo.Type == type)) + return new(objectInfo.TypeInfo, objectInfo.Binding); + + var typeId = SerializerOptions.ToCanonicalTypeId(nestedColumn.PostgresType); + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type, typeId, SerializerOptions); + var concreteTypeInfo = typeInfo.MakeConcreteForField(nestedColumn.Field); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, SerializerOptions, typeId, resolved: true); + var columnInfo = new ReadConversionContext(concreteTypeInfo, concreteTypeInfo.BindField(DataFormat)); + _columns[ordinal] = nestedColumn with { LastInfo = columnInfo }; + return columnInfo; } enum ReaderState @@ -511,4 +515,4 @@ enum ReaderState Closed, Disposed } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlNotificationEventArgs.cs b/src/Npgsql/NpgsqlNotificationEventArgs.cs index 454ccdc98a..82e00b18a6 100644 --- a/src/Npgsql/NpgsqlNotificationEventArgs.cs +++ b/src/Npgsql/NpgsqlNotificationEventArgs.cs @@ -26,22 +26,10 @@ public sealed class NpgsqlNotificationEventArgs : EventArgs /// public string Payload { get; } - /// - /// The channel on which the notification was sent. - /// - [Obsolete("Use Channel instead")] - public string Condition => Channel; - - /// - /// An optional payload string that was sent with this notification. - /// - [Obsolete("Use Payload instead")] - public string AdditionalInformation => Payload; - internal NpgsqlNotificationEventArgs(NpgsqlReadBuffer buf) { PID = buf.ReadInt32(); Channel = buf.ReadNullTerminatedString(); Payload = buf.ReadNullTerminatedString(); } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlOperationInProgressException.cs b/src/Npgsql/NpgsqlOperationInProgressException.cs index eb7377afcd..052167ced3 100644 --- a/src/Npgsql/NpgsqlOperationInProgressException.cs +++ b/src/Npgsql/NpgsqlOperationInProgressException.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; namespace Npgsql; @@ -16,9 +16,7 @@ public sealed class NpgsqlOperationInProgressException : NpgsqlException /// public NpgsqlOperationInProgressException(NpgsqlCommand command) : base("A command is already in progress: " + command.CommandText) - { - CommandInProgress = command; - } + => CommandInProgress = command; internal NpgsqlOperationInProgressException(ConnectorState state) : base($"The connection is already in state '{state}'") @@ -31,4 +29,4 @@ internal NpgsqlOperationInProgressException(ConnectorState state) /// . /// public NpgsqlCommand? CommandInProgress { get; } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 96649f9ccc..135a1be9b7 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -2,16 +2,19 @@ using System.ComponentModel; using System.Data; using System.Data.Common; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.ExceptionServices; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.TypeMapping; using Npgsql.Util; using NpgsqlTypes; -using static Npgsql.Util.Statics; namespace Npgsql; @@ -26,29 +29,28 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable private protected byte _scale; private protected int _size; - // ReSharper disable InconsistentNaming - private protected NpgsqlDbType? _npgsqlDbType; - private protected string? _dataTypeName; - // ReSharper restore InconsistentNaming + internal NpgsqlDbType? _npgsqlDbType; + internal string? _dataTypeName; + internal DbType? _dbType; - private protected string _name = string.Empty; - private protected object? _value; - private protected string _sourceColumn; + private protected string _name = string.Empty; + object? _value; + private protected bool _useSubStream; + private protected Stream? _subStream; + private protected string _sourceColumn; internal string TrimmedName { get; private protected set; } = PositionalName; - internal const string PositionalName = ""; - - /// - /// Can be used to communicate a value from the validation phase to the writing phase. - /// To be used by type handlers only. - /// - public object? ConvertedValue { get; set; } + internal const string PositionalName = ""; - internal NpgsqlLengthCache? LengthCache { get; set; } + IDbTypeResolver? _dbTypeResolver; + private protected PgTypeInfo? TypeInfo { get; private set; } + private protected PgConcreteTypeInfo? ConcreteTypeInfo { get; private set; } - internal NpgsqlTypeHandler? Handler { get; set; } + internal PgTypeId PgTypeId => ConcreteTypeInfo?.PgTypeId ?? default; - internal FormatCode FormatCode { get; private set; } + internal DataFormat Format => _binding?.DataFormat ?? DataFormat.Binary; + private protected object? _writeState; + private protected PgValueBinding? _binding; #endregion @@ -249,14 +251,14 @@ public sealed override string ParameterName { if (Collection is not null) Collection.ChangeParameterName(this, value); - else + else ChangeParameterName(value); } } internal void ChangeParameterName(string? value) { - if (value == null) + if (value is null) _name = TrimmedName = PositionalName; else if (value.Length > 0 && (value[0] == ':' || value[0] == '@')) TrimmedName = (_name = value).Substring(1); @@ -277,10 +279,11 @@ public override object? Value get => _value; set { - if (_value == null || value == null || _value.GetType() != value.GetType()) - Handler = null; + if (ShouldResetObjectTypeInfo(value)) + ResetTypeInfo(); + else + DisposeBindingState(); _value = value; - ConvertedValue = null; } } @@ -313,28 +316,32 @@ public sealed override DbType DbType { get { - if (_npgsqlDbType.HasValue) - return GlobalTypeMapper.NpgsqlDbTypeToDbType(_npgsqlDbType.Value); + if (_dbType is { } dbType) + return dbType; if (_dataTypeName is not null) - return GlobalTypeMapper.NpgsqlDbTypeToDbType(GlobalTypeMapper.DataTypeNameToNpgsqlDbType(_dataTypeName)); - - if (Value is not null) // Infer from value but don't cache { - return GlobalTypeMapper.Instance.TryResolveMappingByValue(Value, out var mapping) - ? mapping.DbType - : DbType.Object; + var dataTypeName = Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName); + if (TryResolveDbType(dataTypeName, out var resolvedDbType)) + return resolvedDbType; + + return dataTypeName.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; } + if (_npgsqlDbType is { } npgsqlDbType) + return npgsqlDbType.ToDbType(); + + // Infer from value but don't cache + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + if (GetValueType(StaticValueType) is { } valueType) + return GlobalTypeMapper.Instance.FindDataTypeName(valueType, Value)?.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; + return DbType.Object; } set { - Handler = null; - _npgsqlDbType = value == DbType.Object - ? null - : GlobalTypeMapper.DbTypeToNpgsqlDbType(value) - ?? throw new NotSupportedException($"The parameter type DbType.{value} isn't supported by PostgreSQL or Npgsql"); + ResetTypeInfo(); + _dbType = value; } } @@ -347,32 +354,38 @@ public sealed override DbType DbType [DbProviderSpecificTypeProperty(true)] public NpgsqlDbType NpgsqlDbType { - [RequiresUnreferencedCode("The NpgsqlDbType getter isn't trimming-safe")] get { if (_npgsqlDbType.HasValue) return _npgsqlDbType.Value; if (_dataTypeName is not null) - return GlobalTypeMapper.DataTypeNameToNpgsqlDbType(_dataTypeName); + return Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName).ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; - if (Value is not null) // Infer from value + var valueType = GetValueType(StaticValueType); + if (_dbType is { } dbType) { - return GlobalTypeMapper.Instance.TryResolveMappingByValue(Value, out var mapping) - ? mapping.NpgsqlDbType ?? NpgsqlDbType.Unknown - : throw new NotSupportedException("Can't infer NpgsqlDbType for type " + Value.GetType()); + if (TryResolveDbTypeDataTypeName(dbType, valueType, out var dataTypeName)) + return NpgsqlDbTypeExtensions.ToNpgsqlDbType(dataTypeName) ?? NpgsqlDbType.Unknown; + + return dbType.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; } + // Infer from value but don't cache + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + if (valueType is not null) + return GlobalTypeMapper.Instance.FindDataTypeName(valueType, Value)?.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; + return NpgsqlDbType.Unknown; } set { if (value == NpgsqlDbType.Array) - throw new ArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Array, Binary-Or with the element type (e.g. Array of Box is NpgsqlDbType.Array | NpgsqlDbType.Box)."); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Array, Binary-Or with the element type (e.g. Array of Box is NpgsqlDbType.Array | NpgsqlDbType.Box)."); if (value == NpgsqlDbType.Range) - throw new ArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Range, Binary-Or with the element type (e.g. Range of integer is NpgsqlDbType.Range | NpgsqlDbType.Integer)"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Range, Binary-Or with the element type (e.g. Range of integer is NpgsqlDbType.Range | NpgsqlDbType.Integer)"); - Handler = null; + ResetTypeInfo(); _npgsqlDbType = value; } } @@ -387,22 +400,36 @@ public string? DataTypeName if (_dataTypeName != null) return _dataTypeName; - if (_npgsqlDbType.HasValue) - return GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(_npgsqlDbType.Value); + // Map it to a display name. + if (_npgsqlDbType is { } npgsqlDbType) + { + var unqualifiedName = npgsqlDbType.ToUnqualifiedDataTypeName(); + return unqualifiedName is null ? null : Internal.Postgres.DataTypeName.ValidatedName( + "pg_catalog." + unqualifiedName).UnqualifiedDisplayName; + } - if (Value != null) // Infer from value + var valueType = GetValueType(StaticValueType); + if (_dbType is { } dbType) { - return GlobalTypeMapper.Instance.TryResolveMappingByValue(Value, out var mapping) - ? mapping.DataTypeName - : null; + if (TryResolveDbTypeDataTypeName(dbType, valueType, out var dataTypeName)) + return dataTypeName; + + var unqualifiedName = dbType.ToNpgsqlDbType()?.ToUnqualifiedDataTypeName(); + return unqualifiedName is null ? null : Internal.Postgres.DataTypeName.ValidatedName( + "pg_catalog." + unqualifiedName).UnqualifiedDisplayName; } + // Infer from value but don't cache + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + if (valueType is not null) + return GlobalTypeMapper.Instance.FindDataTypeName(valueType, Value)?.DisplayName; + return null; } set { + ResetTypeInfo(); _dataTypeName = value; - Handler = null; } } @@ -418,7 +445,6 @@ public string? DataTypeName [Category("Data")] public sealed override ParameterDirection Direction { get; set; } -#pragma warning disable CS0109 /// /// Gets or sets the maximum number of digits used to represent the property. /// @@ -430,11 +456,7 @@ public string? DataTypeName public new byte Precision { get => _precision; - set - { - _precision = value; - Handler = null; - } + set => _precision = value; } /// @@ -446,13 +468,8 @@ public string? DataTypeName public new byte Scale { get => _scale; - set - { - _scale = value; - Handler = null; - } + set => _scale = value; } -#pragma warning restore CS0109 /// [DefaultValue(0)] @@ -463,10 +480,10 @@ public sealed override int Size set { if (value < -1) - throw new ArgumentException($"Invalid parameter Size value '{value}'. The value must be greater than or equal to 0."); + ThrowHelper.ThrowArgumentException($"Invalid parameter Size value '{value}'. The value must be greater than or equal to 0."); + DisposeBindingState(); _size = value; - Handler = null; } } @@ -486,12 +503,10 @@ public sealed override string SourceColumn /// public sealed override bool SourceColumnNullMapping { get; set; } -#pragma warning disable CA2227 /// /// The collection to which this parameter belongs, if any. /// public NpgsqlParameterCollection? Collection { get; set; } -#pragma warning restore CA2227 /// /// The PostgreSQL data type, such as int4 or text, as discovered from pg_type. @@ -505,52 +520,494 @@ public sealed override string SourceColumn #region Internals - internal virtual void ResolveHandler(TypeMapper typeMapper) + private protected virtual Type StaticValueType => typeof(object); + + Type? GetValueType(Type staticValueType) => staticValueType != typeof(object) ? staticValueType : Value?.GetType(); + + bool TryResolveDbType(DataTypeName dataTypeName, out DbType dbType) { - if (Handler is not null) - return; + if (_dbTypeResolver?.GetDbType(dataTypeName) is { } result) + { + dbType = result; + return true; + } + + dbType = default; + return false; + } - if (_npgsqlDbType.HasValue) - Handler = typeMapper.ResolveByNpgsqlDbType(_npgsqlDbType.Value); - else if (_dataTypeName is not null) - Handler = typeMapper.ResolveByDataTypeName(_dataTypeName); - else if (_value is not null) - Handler = typeMapper.ResolveByValue(_value); + bool TryResolveDbTypeDataTypeName(DbType dbType, Type? type, [NotNullWhen(true)]out string? normalizedDataTypeName) + { + if (_dbTypeResolver?.GetDataTypeName(dbType, type) is { } result) + { + normalizedDataTypeName = Internal.Postgres.DataTypeName.NormalizeName(result); + return true; + } + + normalizedDataTypeName = null; + return false; + } + + internal void SetOutputValue(NpgsqlDataReader reader, int ordinal) + { + // Set Value (not _value) so we also support object typed generic params. + if (StaticValueType == typeof(object)) + Value = reader.GetValue(ordinal); else + SetOutputTypedValue(reader, ordinal); + } + + internal bool ShouldResetObjectTypeInfo(object? value) + { + var currentType = TypeInfo?.Type; + if (currentType is null || value is null) + return false; + + var valueType = value.GetType(); + // We don't want to reset the type info when the value is a DBNull, we're able to write it out with any type info. + return valueType != typeof(DBNull) && currentType != valueType; + } + + internal void GetResolutionInfo(out PgTypeInfo? typeInfo, out PgConcreteTypeInfo? concreteTypeInfo) + { + typeInfo = TypeInfo; + concreteTypeInfo = ConcreteTypeInfo; + } + + internal void SetResolutionInfo(PgTypeInfo typeInfo, PgConcreteTypeInfo concreteTypeInfo) + { + if (_binding is not null) + DisposeBindingState(); + + // Dispose any provider-produced _writeState against its current ConcreteTypeInfo before we + // overwrite it — once reassigned, the restored ConcreteTypeInfo can't dispose state produced + // by the about-to-be-discarded one. + if (_writeState is { } ws) { - var parameterName = !string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}"; - throw new InvalidOperationException($"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set"); + ConcreteTypeInfo?.DisposeWriteState(ws); + _writeState = null; } + + TypeInfo = typeInfo; + ConcreteTypeInfo = concreteTypeInfo; } - internal void Bind(TypeMapper typeMapper) + /// Attempt to resolve a type info based on available (postgres) type information on the parameter. + /// When is false (e.g. SchemaOnly), any provider-produced write state is + /// disposed immediately because no Bind call will follow to take ownership of it. + internal void ResolveTypeInfo(PgSerializerOptions options, IDbTypeResolver? dbTypeResolver, bool willBind = true) { - ResolveHandler(typeMapper); - FormatCode = Handler!.PreferTextWrite ? FormatCode.Text : FormatCode.Binary; + var typeInfo = TypeInfo; + var staticValueType = StaticValueType; + var previouslyResolved = ReferenceEquals(typeInfo?.Options, options); + if (!previouslyResolved) + { + var valueType = GetValueType(staticValueType); + + string? dataTypeName = null; + if (_dataTypeName is not null) + { + dataTypeName = Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName); + } + else if (_npgsqlDbType is { } npgsqlDbType) + { + dataTypeName = npgsqlDbType.ToDataTypeName() ?? npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow(); + } + else if (_dbType is { } dbType) + { + if (dbTypeResolver is not null) + { + _dbTypeResolver = dbTypeResolver; + if (dbTypeResolver.GetDataTypeName(dbType, valueType) is { } result) + { + dataTypeName = Internal.Postgres.DataTypeName.NormalizeName(result); + } + } + + // Fall back to builtin mappings if there was no resolver, or it didn't produce a result. + if (dataTypeName is null) + { + dataTypeName = dbType.ToNpgsqlDbType()?.ToDataTypeName(); + // If DbType.Object was specified we will only throw (see ThrowNoTypeInfo) if valueType is also null. + if (dataTypeName is null && dbType is not DbType.Object) + ThrowDbTypeNotSupported(); + } + } + + PgTypeId? pgTypeId = null; + if (dataTypeName is not null) + { + if (!options.DatabaseInfo.TryGetPostgresTypeByName(dataTypeName, out var pgType)) + { + ThrowNotSupported(dataTypeName); + return; + } + + pgTypeId = options.ToCanonicalTypeId(pgType.GetRepresentationalType()); + } + + if (pgTypeId is null && valueType is null) + { + ThrowNoTypeInfo(); + return; + } + + // We treat object typed DBNull values as default info (we don't supply a type). + // Unless we don't have a pgTypeId either, at which point we'll use an 'unspecified' PgTypeInfo to help us write a NULL. + if (valueType == typeof(DBNull) && staticValueType == typeof(object)) + { + TypeInfo = typeInfo = pgTypeId is null + ? options.UnspecifiedDBNullTypeInfo + : AdoSerializerHelpers.GetTypeInfoForWriting(type: null, pgTypeId, options, _npgsqlDbType); + } + else + { + TypeInfo = typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); + } + } + + // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. + // We never reuse concrete type infos from providers across executions as a mutable value itself may influence the result. + // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. + if (!previouslyResolved || typeInfo is not PgConcreteTypeInfo) + { + Debug.Assert(typeInfo is not null); + DisposeBindingState(); + + // Dispose any stale _writeState from a previous resolution against its current + // ConcreteTypeInfo before the branches below overwrite it — this covers the "failed resolution, + // caller fixed the value, called again" self-heal path (e.g. NpgsqlBinaryImporter's PgTypeId check). + if (_writeState is { } staleWs) + { + ConcreteTypeInfo?.DisposeWriteState(staleWs); + _writeState = null; + } + + if (staticValueType == typeof(object)) + { + // Pull from Value (not _value) so we also support object typed generic params. + var value = Value; + ConcreteTypeInfo = typeInfo.MakeConcreteForValueAsObject(value is DBNull ? null : value, out _writeState); + } + else + { + ConcreteTypeInfo = MakeConcreteTypeInfoForTypedValue(typeInfo); + } + + // If no Bind follows (SchemaOnly), release the provider-produced state immediately so + // lifecycle stays contained inside the parameter. + if ((!willBind || !ConcreteTypeInfo.SupportsWriting) && _writeState is { } ws) + { + ConcreteTypeInfo.DisposeWriteState(ws); + _writeState = null; + } + + if (!ConcreteTypeInfo.SupportsWriting) + AdoSerializerHelpers.ThrowWritingNotSupported(GetValueType(staticValueType), options, ConcreteTypeInfo.PgTypeId, _npgsqlDbType, ParameterName, resolved: true); + } + + void ThrowNoTypeInfo() + => ThrowHelper.ThrowInvalidOperationException( + $"Parameter '{(!string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}")}' must have either its DbType, NpgsqlDbType, DataTypeName or its Value set."); + + void ThrowDbTypeNotSupported() + => ThrowHelper.ThrowNotSupportedException( + $"The DbType '{_dbType}' isn't supported by Npgsql. There might be an Npgsql plugin with support for this DbType."); + + void ThrowNotSupported(string dataTypeName) + => ThrowHelper.ThrowNotSupportedException( + $"The data type name '{dataTypeName}'{(_npgsqlDbType is not null ? $", provided as NpgsqlDbType '{_npgsqlDbType}'," : null)} could not be found in the types that were loaded by Npgsql. " + + $"Your database details or Npgsql type loading configuration may be incorrect. Alternatively your PostgreSQL installation might need to be upgraded, or an extension adding the missing data type might not have been installed."); } - internal virtual int ValidateAndGetLength() + /// Bind the current value to the type info, truncate (if applicable), take its size, and do any final validation before writing. + internal void Bind(out DataFormat format, out Size size, DataFormat? requiredFormat = null) { - if (_value is DBNull) - return 0; - if (_value == null) - throw new InvalidCastException($"Parameter {ParameterName} must be set"); + if (TypeInfo is null || ConcreteTypeInfo is null) + ThrowHelper.ThrowInvalidOperationException($"Missing type info, {nameof(ResolveTypeInfo)} needs to be called before {nameof(Bind)}."); + + // We might call this twice, once during validation and once during WriteBind, only compute things once. + // Bind is atomic *and* self-cleaning: the local binding is only committed to _binding + // (and _writeState nulled) after every check passes, and any exception before commit disposes + // the resolution-time _writeState ourselves so callers don't need to know about it. + if (_binding is null) + { + if (_size > 0) + HandleSizeTruncation(ConcreteTypeInfo); + + try + { + PgValueBinding binding; + if (_useSubStream) + { + binding = BindSubStream(); + } + else if (StaticValueType == typeof(object)) + { + // Pull from Value so we also support object typed generic params. + var value = Value; + if (value is null) + ThrowHelper.ThrowInvalidOperationException($"Parameter '{ParameterName}' cannot be null, DBNull.Value should be used instead."); + + binding = ConcreteTypeInfo.BindParameterObjectValue(value, _writeState, requiredFormat); + } + else + { + binding = BindTypedValue(ConcreteTypeInfo, formatPreference: requiredFormat); + } + + // Enforce that provider-produced _writeState flows end-to-end through the binding unchanged. + // A converter that accepts _writeState as input must thread the same instance into its returned + // binding's WriteState. Swapping to a different instance is a contract violation because it + // forks the lifecycle (the resolution-time state would be orphaned and the bind-time state + // would be unowned by this parameter). + if (_writeState is not null && !ReferenceEquals(_writeState, binding.WriteState)) + ThrowHelper.ThrowInvalidOperationException( + $"Binding for parameter '{ParameterName}' replaced the provider-produced write state with a different instance. " + + "Converters must thread the write state through unchanged."); + + if (requiredFormat is not null && binding.DataFormat != requiredFormat) + ThrowHelper.ThrowNotSupportedException($"Parameter '{ParameterName}' must be written in {requiredFormat} format, but does not support this format."); + + // Binding and ownership transfer of state happen together. + _binding = binding; + _writeState = null; + } + catch + { + if (_writeState is { } ws) + { + ConcreteTypeInfo.DisposeWriteState(ws); + _writeState = null; + } + if (_subStream is not null) + { + _subStream.Dispose(); + _subStream = null; + } + _useSubStream = false; + throw; + } + } + else if (requiredFormat is not null && _binding.GetValueOrDefault().DataFormat != requiredFormat) + { + ThrowHelper.ThrowNotSupportedException($"Parameter '{ParameterName}' must be written in {requiredFormat} format, but does not support this format."); + } - var lengthCache = LengthCache; - var len = Handler!.ValidateObjectAndGetLength(_value, ref lengthCache, this); - LengthCache = lengthCache; - return len; + format = Format; + size = _binding.GetValueOrDefault().Size ?? -1; + + [MethodImpl(MethodImplOptions.NoInlining)] + PgValueBinding BindSubStream() + { + // Pull from Value so we also support object typed generic params. + var stream = (Stream?)Value; + Debug.Assert(stream is not null, "_useSubStream should only be true if we had a value during HandleSizeTruncation"); + int subSize; + if (stream.CanSeek) + { + var remaining = Math.Max(0, stream.Length - stream.Position); + subSize = remaining < _size ? (int)remaining : _size; + _subStream = new SubReadStream(stream, _size); + } + else + { + // TODO maybe we can move this IO. + var buffer = new byte[_size]; + var read = stream.ReadAtLeast(buffer, _size, throwOnEndOfStream: false); + subSize = Math.Min(_size, read); + _subStream = new MemoryStream(buffer, 0, subSize); + } + return new(DataFormat.Binary, 0, subSize, null); + } + + // Handle Size truncate behavior for a predetermined set of types and pg types. + // Doesn't matter if we 'box' Value, all supported types are reference types. + [MethodImpl(MethodImplOptions.NoInlining)] + void HandleSizeTruncation(PgConcreteTypeInfo typeInfo) + { + var type = typeInfo.Type; + if ((type != typeof(string) && type != typeof(char[]) && type != typeof(byte[]) && !type.IsAssignableTo(typeof(Stream))) || Value is not { } value) + return; + + var dataTypeName = typeInfo.Options.GetDataTypeName(PgTypeId); + if (dataTypeName == DataTypeNames.Text || dataTypeName == DataTypeNames.Varchar || dataTypeName == DataTypeNames.Bpchar) + { + if (value is string s && s.Length > _size) + Value = s.Substring(0, _size); + else if (value is char[] chars && chars.Length > _size) + { + var truncated = new char[_size]; + Array.Copy(chars, truncated, _size); + Value = truncated; + } + } + else if (dataTypeName == DataTypeNames.Bytea) + { + if (value is byte[] bytes && bytes.Length > _size) + { + var truncated = new byte[_size]; + Array.Copy(bytes, truncated, _size); + Value = truncated; + } + else if (value is Stream) + { + // Substream path abandons the resolver-produced state, we must dispose it here to prevent the no swap exception. + if (_writeState is { } ws) + { + typeInfo.DisposeWriteState(ws); + _writeState = null; + } + _useSubStream = true; + } + } + } } - internal virtual Task WriteWithLength(NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - => Handler!.WriteObjectWithLength(_value!, buf, LengthCache, this, async, cancellationToken); + internal async ValueTask Write(bool async, PgWriter writer, CancellationToken cancellationToken) + { + if (_binding is not { } binding) + { + ThrowHelper.ThrowInvalidOperationException("Missing type info or binding info."); + return; + } + Debug.Assert(ConcreteTypeInfo is not null); + + try + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var size = binding.Size?.Value ?? -1; + writer.WriteInt32(size); + writer.CommitAndResetTotal(sizeof(int)); + + if (!binding.IsDbNullBinding) + { + if (_useSubStream) + { + Debug.Assert(_subStream is not null); + if (async) + await _subStream.CopyToAsync(writer.GetStream(), cancellationToken).ConfigureAwait(false); + else + _subStream.CopyTo(writer.GetStream()); + writer.CommitAndResetTotal(size); + } + else + { + await writer.StartWrite(async, binding, cancellationToken).ConfigureAwait(false); + var typeInfo = ConcreteTypeInfo; + if (StaticValueType == typeof(object)) + { + // Pull from Value so we also support object typed generic params. + var value = Value; + Debug.Assert(value is not null); + if (async) + { + await typeInfo.Converter.WriteAsObjectAsync(writer, value, cancellationToken).ConfigureAwait(false); + } + else + { + typeInfo.Converter.WriteAsObject(writer, value); + } + } + else + { + await WriteTypedValue(async, typeInfo, writer, cancellationToken).ConfigureAwait(false); + } + writer.EndWrite(size); + } + } + } + finally + { + DisposeBindingState(); + } + } + + private protected virtual PgConcreteTypeInfo MakeConcreteTypeInfoForTypedValue(PgTypeInfo typeInfo) + => throw new NotSupportedException(); + + private protected virtual PgValueBinding BindTypedValue(PgConcreteTypeInfo typeInfo, DataFormat? formatPreference) + => throw new NotSupportedException(); + + private protected virtual ValueTask WriteTypedValue(bool async, PgConcreteTypeInfo typeInfo, PgWriter writer, CancellationToken cancellationToken) + => throw new NotSupportedException(); + + private protected virtual void SetOutputTypedValue(NpgsqlDataReader reader, int ordinal) + => throw new NotSupportedException(); /// public override void ResetDbType() { + _dbType = null; _npgsqlDbType = null; _dataTypeName = null; - Handler = null; + ResetTypeInfo(); + } + + private protected void ResetTypeInfo() + { + DisposeBindingState(); + + // Dispose any provider-produced _writeState as well. + if (_writeState is { } ws) + { + ConcreteTypeInfo?.DisposeWriteState(ws); + _writeState = null; + } + + TypeInfo = null; + ConcreteTypeInfo = null; + } + + private protected void DisposeBindingState() + { + try + { + if (_binding is not { } binding) + { + Debug.Assert(!_useSubStream && _subStream is null); + return; + } + + // Dispose write state first as it may hold a reference to _subStream. + Debug.Assert(ConcreteTypeInfo is not null); + Exception? disposalException = null; + if (binding.WriteState is { } writeState) + { + try + { + ConcreteTypeInfo.DisposeWriteState(writeState); + } + catch (Exception ex) + { + disposalException = ex; + } + } + + if (_useSubStream) + { + Debug.Assert(_subStream is not null); + try + { + _subStream.Dispose(); + } + catch (Exception ex) when (disposalException is not null) + { + throw new AggregateException(disposalException, ex); + } + } + + if (disposalException is not null) + ExceptionDispatchInfo.Throw(disposalException); + } + finally + { + _useSubStream = false; + _subStream = null; + _binding = null; + } } internal bool IsInputDirection => Direction == ParameterDirection.InputOutput || Direction == ParameterDirection.Input; @@ -575,6 +1032,7 @@ private protected virtual NpgsqlParameter CloneCore() => _precision = _precision, _scale = _scale, _size = _size, + _dbType = _dbType, _npgsqlDbType = _npgsqlDbType, _dataTypeName = _dataTypeName, Direction = Direction, @@ -590,4 +1048,4 @@ private protected virtual NpgsqlParameter CloneCore() => object ICloneable.Clone() => Clone(); #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 89a66244af..917eb9311b 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -5,7 +5,6 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using Npgsql.TypeMapping; using NpgsqlTypes; namespace Npgsql; @@ -36,7 +35,7 @@ static NpgsqlParameterCollection() /// /// Initializes a new instance of the NpgsqlParameterCollection class. /// - internal NpgsqlParameterCollection() {} + internal NpgsqlParameterCollection() { } bool LookupEnabled => InternalList.Count >= LookupThreshold; @@ -143,7 +142,7 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) var oldTrimmedName = parameter.TrimmedName; parameter.ChangeParameterName(value); - if (_caseInsensitiveLookup is null || _caseInsensitiveLookup.Count == 0) + if (_caseInsensitiveLookup is null) return; var index = IndexOf(parameter); @@ -166,28 +165,25 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) { get { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); var index = IndexOf(parameterName); if (index == -1) - throw new ArgumentException("Parameter not found"); + ThrowHelper.ThrowArgumentException("Parameter not found"); return InternalList[index]; } set { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); - if (value is null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(parameterName); + ArgumentNullException.ThrowIfNull(value); var index = IndexOf(parameterName); if (index == -1) - throw new ArgumentException("Parameter not found"); + ThrowHelper.ThrowArgumentException("Parameter not found"); if (!string.Equals(parameterName, value.TrimmedName, StringComparison.OrdinalIgnoreCase)) - throw new ArgumentException("Parameter name must be a case-insensitive match with the property 'ParameterName' on the given NpgsqlParameter", nameof(parameterName)); + ThrowHelper.ThrowArgumentException("Parameter name must be a case-insensitive match with the property 'ParameterName' on the given NpgsqlParameter", nameof(parameterName)); var oldValue = InternalList[index]; LookupChangeName(value, oldValue.ParameterName, oldValue.TrimmedName, index); @@ -206,10 +202,9 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) get => InternalList[index]; set { - if (value is null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); if (value.Collection is not null) - throw new InvalidOperationException("The parameter already belongs to a collection"); + ThrowHelper.ThrowInvalidOperationException("The parameter already belongs to a collection"); var oldValue = InternalList[index]; @@ -228,13 +223,12 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) /// Adds the specified object to the . /// /// The to add to the collection. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(NpgsqlParameter value) { - if (value is null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); if (value.Collection is not null) - throw new InvalidOperationException("The parameter already belongs to a collection"); + ThrowHelper.ThrowInvalidOperationException("The parameter already belongs to a collection"); InternalList.Add(value); value.Collection = this; @@ -315,7 +309,7 @@ public NpgsqlParameter AddWithValue(NpgsqlDbType parameterType, object value) /// /// The name of the parameter. /// One of the values. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType) => Add(new NpgsqlParameter(parameterName, parameterType)); @@ -326,7 +320,7 @@ public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType) /// The name of the parameter. /// One of the values. /// The length of the column. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType, int size) => Add(new NpgsqlParameter(parameterName, parameterType, size)); @@ -338,7 +332,7 @@ public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType, int /// One of the values. /// The length of the column. /// The name of the source column. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType, int size, string sourceColumn) => Add(new NpgsqlParameter(parameterName, parameterType, size, sourceColumn)); @@ -404,7 +398,7 @@ public override int IndexOf(string parameterName) void BuildLookup() { if (TwoPassCompatMode) - _caseSensitiveLookup = new Dictionary(InternalList.Count, StringComparer.Ordinal); + _caseSensitiveLookup = new Dictionary(InternalList.Count); _caseInsensitiveLookup = new Dictionary(InternalList.Count, StringComparer.OrdinalIgnoreCase); @@ -430,28 +424,34 @@ void BuildLookup() /// The zero-based index of the parameter. public override void RemoveAt(int index) { - if (InternalList.Count - 1 < index) - throw new ArgumentOutOfRangeException(nameof(index)); + ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual(index, InternalList.Count); Remove(InternalList[index]); } - /// + /// + /// Inserts a parameter into the at the specified index. + /// + /// The zero-based index at which to insert the parameter. + /// The parameter to insert. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// public override void Insert(int index, object value) => Insert(index, Cast(value)); /// - /// Removes the specified from the collection. + /// Removes the with the specified name from the collection. /// /// The name of the to remove from the collection. public void Remove(string parameterName) { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); var index = IndexOf(parameterName); if (index < 0) - throw new InvalidOperationException("No parameter with the specified name exists in the collection"); + ThrowHelper.ThrowInvalidOperationException("No parameter with the specified name exists in the collection"); RemoveAt(index); } @@ -460,6 +460,10 @@ public void Remove(string parameterName) /// Removes the specified from the collection. /// /// The to remove from the collection. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// public override void Remove(object value) => Remove(Cast(value)); @@ -481,8 +485,7 @@ public override bool Contains(object value) /// public bool TryGetValue(string parameterName, [NotNullWhen(true)] out NpgsqlParameter? parameter) { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); var index = IndexOf(parameterName); @@ -509,11 +512,29 @@ public override void Clear() LookupClear(); } - /// + /// + /// Returns the index of the specified parameter in the . + /// + /// The parameter to find. + /// The index of the parameter if found; otherwise, -1. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// public override int IndexOf(object value) => IndexOf(Cast(value)); - /// + /// + /// Adds a parameter to the . + /// + /// The parameter to add. + /// The zero-based index at which the parameter was added. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// To add a parameter by value, use , , + /// or one of the typed overloads. + /// public override int Add(object value) { Add(Cast(value)); @@ -558,14 +579,19 @@ IEnumerator IEnumerable.GetEnumerator() #endregion - /// + /// + /// Adds the elements of the specified array to the end of the . + /// + /// + /// An array of s to add. Each item must be an instance of . + /// Passing any other type will result in an . + /// public override void AddRange(Array values) { - if (values is null) - throw new ArgumentNullException(nameof(values)); + ArgumentNullException.ThrowIfNull(values); foreach (var parameter in values) - Add(Cast(parameter) ?? throw new ArgumentException("Collection contains a null value.", nameof(values))); + Add(Cast(parameter)); } /// @@ -599,8 +625,7 @@ public int IndexOf(NpgsqlParameter item) /// Parameter to insert. public void Insert(int index, NpgsqlParameter item) { - if (item is null) - throw new ArgumentNullException(nameof(item)); + ArgumentNullException.ThrowIfNull(item); if (item.Collection != null) throw new Exception("The parameter already belongs to a collection"); @@ -624,10 +649,9 @@ public void Insert(int index, NpgsqlParameter item) /// True if the parameter was found and removed, otherwise false. public bool Remove(NpgsqlParameter item) { - if (item == null) - throw new ArgumentNullException(nameof(item)); + ArgumentNullException.ThrowIfNull(item); if (item.Collection != this) - throw new InvalidOperationException("The item does not belong to this collection"); + ThrowHelper.ThrowInvalidOperationException("The item does not belong to this collection"); var index = IndexOf(item); if (index >= 0) @@ -664,7 +688,7 @@ internal void CloneTo(NpgsqlParameterCollection other) foreach (var param in InternalList) { var newParam = param.Clone(); - newParam.Collection = this; + newParam.Collection = other; other.InternalList.Add(newParam); } @@ -674,19 +698,20 @@ internal void CloneTo(NpgsqlParameterCollection other) if (TwoPassCompatMode) { Debug.Assert(_caseSensitiveLookup is not null); - other._caseSensitiveLookup = new Dictionary(_caseSensitiveLookup, StringComparer.Ordinal); + other._caseSensitiveLookup = new Dictionary(_caseSensitiveLookup); } } } - internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, CommandType commandType) + internal void ProcessParameters(NpgsqlDataSource.ReloadableState reloadableState, bool validateValues, CommandType commandType) { HasOutputParameters = false; PlaceholderType = PlaceholderType.NoParameters; - for (var i = 0; i < InternalList.Count; i++) + var list = InternalList; + for (var i = 0; i < list.Count; i++) { - var p = InternalList[i]; + var p = list[i]; switch (PlaceholderType) { @@ -704,8 +729,8 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm case PlaceholderType.Mixed: break; default: - throw new ArgumentOutOfRangeException( - nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {PlaceholderType}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {{0}}", PlaceholderType); + break; } switch (p.Direction) @@ -715,13 +740,13 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm case ParameterDirection.InputOutput: if (PlaceholderType == PlaceholderType.Positional && commandType != CommandType.StoredProcedure) - throw new NotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); + ThrowHelper.ThrowNotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); HasOutputParameters = true; break; case ParameterDirection.Output: if (PlaceholderType == PlaceholderType.Positional && commandType != CommandType.StoredProcedure) - throw new NotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); + ThrowHelper.ThrowNotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); HasOutputParameters = true; continue; @@ -730,17 +755,14 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm continue; default: - throw new ArgumentOutOfRangeException(nameof(ParameterDirection), - $"Unhandled {nameof(ParameterDirection)} value: {p.Direction}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(ParameterDirection), + $"Unhandled {nameof(ParameterDirection)} value: {{0}}", p.Direction); + break; } - p.Bind(typeMapper); - + p.ResolveTypeInfo(reloadableState.SerializerOptions, reloadableState.DbTypeResolver, willBind: validateValues); if (validateValues) - { - p.LengthCache?.Clear(); - p.ValidateAndGetLength(); - } + p.Bind(out _, out _); } } @@ -748,10 +770,18 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm internal PlaceholderType PlaceholderType { get; set; } static NpgsqlParameter Cast(object? value) - => value is NpgsqlParameter p - ? p - : throw new InvalidCastException( - $"The value \"{value}\" is not of type \"{nameof(NpgsqlParameter)}\" and cannot be used in this parameter collection."); + { + var castedValue = value as NpgsqlParameter; + if (castedValue is null) + ThrowInvalidCastException(value); + + return castedValue; + } + + [DoesNotReturn] + static void ThrowInvalidCastException(object? value) => + throw new InvalidCastException( + $"The value \"{value}\" is not of type \"{nameof(NpgsqlParameter)}\" and cannot be used in this parameter collection."); } enum PlaceholderType diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 0271952431..04a1a214b1 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -1,11 +1,10 @@ -using System; +using System; using System.Data; +using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.TypeMapping; using NpgsqlTypes; -using static Npgsql.Util.Statics; namespace Npgsql; @@ -19,7 +18,18 @@ public sealed class NpgsqlParameter : NpgsqlParameter /// /// Gets or sets the strongly-typed value of the parameter. /// - public T? TypedValue { get; set; } + public T? TypedValue + { + get; + set + { + if (typeof(T) == typeof(object) && ShouldResetObjectTypeInfo(value)) + ResetTypeInfo(); + else + DisposeBindingState(); + field = value; + } + } /// /// Gets or sets the value of the parameter. This delegates to . @@ -30,12 +40,14 @@ public override object? Value set => TypedValue = (T)value!; } + private protected override Type StaticValueType => typeof(T); + #region Constructors /// /// Initializes a new instance of . /// - public NpgsqlParameter() {} + public NpgsqlParameter() { } /// /// Initializes a new instance of with a parameter name and value. @@ -66,33 +78,24 @@ public NpgsqlParameter(string parameterName, DbType dbType) #endregion Constructors - internal override void ResolveHandler(TypeMapper typeMapper) - { - if (Handler is not null) - return; - - // TODO: Better exceptions in case of cast failure etc. - if (_npgsqlDbType.HasValue) - Handler = typeMapper.ResolveByNpgsqlDbType(_npgsqlDbType.Value); - else if (_dataTypeName is not null) - Handler = typeMapper.ResolveByDataTypeName(_dataTypeName); - else - Handler = typeMapper.ResolveByValue(TypedValue); - } + private protected override PgConcreteTypeInfo MakeConcreteTypeInfoForTypedValue(PgTypeInfo typeInfo) + => typeInfo.MakeConcreteForValue(TypedValue, out _writeState); + + private protected override PgValueBinding BindTypedValue(PgConcreteTypeInfo typeInfo, DataFormat? formatPreference) + => typeInfo.BindParameterValue(TypedValue, _writeState, formatPreference); - internal override int ValidateAndGetLength() + private protected override ValueTask WriteTypedValue(bool async, PgConcreteTypeInfo typeInfo, PgWriter writer, CancellationToken cancellationToken) { - if (TypedValue is null or DBNull) - return 0; + Debug.Assert(TypedValue is not null); + if (async) + return typeInfo.Converter.WriteAsync(writer, TypedValue, cancellationToken); - var lengthCache = LengthCache; - var len = Handler!.ValidateAndGetLength(TypedValue, ref lengthCache, this); - LengthCache = lengthCache; - return len; + typeInfo.Converter.Write(writer, TypedValue); + return new(); } - internal override Task WriteWithLength(NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - => Handler!.WriteWithLength(TypedValue, buf, LengthCache, this, async, cancellationToken); + private protected override void SetOutputTypedValue(NpgsqlDataReader reader, int ordinal) + => TypedValue = reader.GetFieldValue(ordinal); private protected override NpgsqlParameter CloneCore() => // use fields instead of properties @@ -102,6 +105,7 @@ private protected override NpgsqlParameter CloneCore() => _precision = _precision, _scale = _scale, _size = _size, + _dbType = _dbType, _npgsqlDbType = _npgsqlDbType, _dataTypeName = _dataTypeName, Direction = Direction, @@ -113,4 +117,4 @@ private protected override NpgsqlParameter CloneCore() => TypedValue = TypedValue, SourceColumnNullMapping = SourceColumnNullMapping, }; -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index c0ef7989db..981065b813 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.IO; using System.Threading; @@ -6,6 +6,7 @@ using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; +using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; #pragma warning disable 1591 @@ -28,7 +29,7 @@ public sealed class NpgsqlRawCopyStream : Stream, ICancelable NpgsqlWriteBuffer _writeBuf; int _leftToReadInDataMsg; - bool _isDisposed, _isConsumed; + CopyStreamState _state = CopyStreamState.Uninitialized; bool _canRead; bool _canWrite; @@ -42,29 +43,25 @@ public sealed class NpgsqlRawCopyStream : Stream, ICancelable public override int WriteTimeout { get => (int) _writeBuf.Timeout.TotalMilliseconds; - set => _writeBuf.Timeout = TimeSpan.FromMilliseconds(value); + set => _writeBuf.Timeout = value > 0 ? TimeSpan.FromMilliseconds(value) : InfiniteTimeout.InfiniteTimeSpan; } public override int ReadTimeout { get => (int) _readBuf.Timeout.TotalMilliseconds; - set - { - _readBuf.Timeout = TimeSpan.FromMilliseconds(value); - // While calling the connector it will overwrite our read buffer timeout - _connector.UserTimeout = value; - } + set => _readBuf.Timeout = value > 0 ? TimeSpan.FromMilliseconds(value) : InfiniteTimeout.InfiniteTimeSpan; } /// /// The copy binary format header signature /// internal static readonly byte[] BinarySignature = - { + [ (byte)'P',(byte)'G',(byte)'C',(byte)'O',(byte)'P',(byte)'Y', (byte)'\n', 255, (byte)'\r', (byte)'\n', 0 - }; + ]; readonly ILogger _copyLogger; + Activity? _activity; #endregion @@ -78,34 +75,54 @@ internal NpgsqlRawCopyStream(NpgsqlConnector connector) _copyLogger = connector.LoggingConfiguration.CopyLogger; } - internal async Task Init(string copyCommand, bool async, CancellationToken cancellationToken = default) + internal async Task Init(string copyCommand, bool async, bool? forExport, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyCommand, async, cancellationToken); - await _connector.Flush(async, cancellationToken); + Debug.Assert(_activity is null); + _activity = _connector.TraceCopyStart(copyCommand, forExport switch + { + true => "COPY TO", + false => "COPY FROM", + null => "COPY", + }); - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + try + { + await _connector.WriteQuery(copyCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var msg = await _connector.ReadMessage(async); - switch (msg.Code) + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) + { + case BackendMessageCode.CopyInResponse: + _state = CopyStreamState.Ready; + var copyInResponse = (CopyInResponseMessage)msg; + IsBinary = copyInResponse.IsBinary; + _canWrite = true; + _writeBuf.StartCopyMode(); + TraceSetImport(); + break; + case BackendMessageCode.CopyOutResponse: + _state = CopyStreamState.Ready; + var copyOutResponse = (CopyOutResponseMessage)msg; + IsBinary = copyOutResponse.IsBinary; + _canRead = true; + TraceSetExport(); + break; + case BackendMessageCode.CommandComplete: + throw new InvalidOperationException( + "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + + "Note that your data has been successfully imported/exported."); + default: + throw _connector.UnexpectedMessageReceived(msg.Code); + } + } + catch (Exception e) { - case BackendMessageCode.CopyInResponse: - var copyInResponse = (CopyInResponseMessage) msg; - IsBinary = copyInResponse.IsBinary; - _canWrite = true; - _writeBuf.StartCopyMode(); - break; - case BackendMessageCode.CopyOutResponse: - var copyOutResponse = (CopyOutResponseMessage) msg; - IsBinary = copyOutResponse.IsBinary; - _canRead = true; - break; - case BackendMessageCode.CommandComplete: - throw new InvalidOperationException( - "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + - "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + - "Note that your data has been successfully imported/exported."); - default: - throw _connector.UnexpectedMessageReceived(msg.Code); + TraceSetException(e); + throw; } } @@ -125,11 +142,7 @@ public override Task WriteAsync(byte[] buffer, int offset, int count, Cancellati return WriteAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if NETSTANDARD2_0 - public void Write(ReadOnlySpan buffer) -#else public override void Write(ReadOnlySpan buffer) -#endif { CheckDisposed(); if (!CanWrite) @@ -143,39 +156,27 @@ public override void Write(ReadOnlySpan buffer) return; } - try - { - // Value is too big, flush. - Flush(); + // Value is too big, flush. + Flush(); - if (buffer.Length <= _writeBuf.WriteSpaceLeft) - { - _writeBuf.WriteBytes(buffer); - return; - } - - // Value is too big even after a flush - bypass the buffer and write directly. - _writeBuf.DirectWrite(buffer); - } - catch (Exception e) + if (buffer.Length <= _writeBuf.WriteSpaceLeft) { - _connector.Break(e); - throw; + _writeBuf.WriteBytes(buffer); + return; } + + // Value is too big even after a flush - bypass the buffer and write directly. + _writeBuf.DirectWrite(buffer); } -#if NETSTANDARD2_0 - public ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) -#else public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) -#endif { CheckDisposed(); if (!CanWrite) throw new InvalidOperationException("Stream not open for writing"); cancellationToken.ThrowIfCancellationRequested(); - using (NoSynchronizationContextScope.Enter()) - return WriteAsyncInternal(buffer, cancellationToken); + + return WriteAsyncInternal(buffer, cancellationToken); async ValueTask WriteAsyncInternal(ReadOnlyMemory buffer, CancellationToken cancellationToken) { @@ -188,36 +189,28 @@ async ValueTask WriteAsyncInternal(ReadOnlyMemory buffer, CancellationToke return; } - try - { - // Value is too big, flush. - await FlushAsync(true, cancellationToken); - - if (buffer.Length <= _writeBuf.WriteSpaceLeft) - { - _writeBuf.WriteBytes(buffer.Span); - return; - } + // Value is too big, flush. + await FlushAsync(true, cancellationToken).ConfigureAwait(false); - // Value is too big even after a flush - bypass the buffer and write directly. - await _writeBuf.DirectWrite(buffer, true, cancellationToken); - } - catch (Exception e) + if (buffer.Length <= _writeBuf.WriteSpaceLeft) { - _connector.Break(e); - throw; + _writeBuf.WriteBytes(buffer.Span); + return; } + + // Value is too big even after a flush - bypass the buffer and write directly. + await _writeBuf.DirectWrite(buffer, true, cancellationToken).ConfigureAwait(false); } } - public override void Flush() => FlushAsync(false).GetAwaiter().GetResult(); + public override void Flush() => FlushAsync(async: false).GetAwaiter().GetResult(); public override Task FlushAsync(CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return FlushAsync(true, cancellationToken); + + return FlushAsync(async: true, cancellationToken); } Task FlushAsync(bool async, CancellationToken cancellationToken = default) @@ -242,11 +235,7 @@ public override Task ReadAsync(byte[] buffer, int offset, int count, Cancel return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if NETSTANDARD2_0 - public int Read(Span span) -#else public override int Read(Span span) -#endif { CheckDisposed(); if (!CanRead) @@ -258,22 +247,18 @@ public override int Read(Span span) return count; } -#if NETSTANDARD2_0 - public ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken) -#else public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken) -#endif { CheckDisposed(); if (!CanRead) throw new InvalidOperationException("Stream not open for reading"); cancellationToken.ThrowIfCancellationRequested(); - using (NoSynchronizationContextScope.Enter()) - return ReadAsyncInternal(); + + return ReadAsyncInternal(); async ValueTask ReadAsyncInternal() { - var count = await ReadCore(buffer.Length, true, cancellationToken); + var count = await ReadCore(buffer.Length, true, cancellationToken).ConfigureAwait(false); if (count > 0) _readBuf.ReadBytes(buffer.Slice(0, count).Span); return count; @@ -282,7 +267,7 @@ async ValueTask ReadAsyncInternal() async ValueTask ReadCore(int count, bool async, CancellationToken cancellationToken = default) { - if (_isConsumed) + if (_state == CopyStreamState.Consumed) return 0; using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); @@ -294,12 +279,15 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat { // We've consumed the current DataMessage (or haven't yet received the first), // read the next message - msg = await _connector.ReadMessage(async); + msg = await _connector.ReadMessage(async).ConfigureAwait(false); } - catch + catch (Exception e) { - if (!_isDisposed) + if (_state != CopyStreamState.Disposed) + { + TraceSetException(e); Cleanup(); + } throw; } @@ -309,9 +297,9 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat _leftToReadInDataMsg = ((CopyDataMessage)msg).Length; break; case BackendMessageCode.CopyDone: - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); - _isConsumed = true; + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + _state = CopyStreamState.Consumed; return 0; default: throw _connector.UnexpectedMessageReceived(msg.Code); @@ -323,7 +311,7 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat // If our buffer is empty, read in more. Otherwise return whatever is there, even if the // user asked for more (normal socket behavior) if (_readBuf.ReadBytesLeft == 0) - await _readBuf.ReadMore(async); + await _readBuf.ReadMore(async).ConfigureAwait(false); Debug.Assert(_readBuf.ReadBytesLeft > 0); @@ -342,16 +330,12 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat /// /// Cancels and terminates an ongoing operation. Any data already written will be discarded. /// - public void Cancel() => Cancel(false).GetAwaiter().GetResult(); + public void Cancel() => Cancel(async: false).GetAwaiter().GetResult(); /// /// Cancels and terminates an ongoing operation. Any data already written will be discarded. /// - public Task CancelAsync() - { - using (NoSynchronizationContextScope.Enter()) - return Cancel(true); - } + public Task CancelAsync() => Cancel(async: true); async Task Cancel(bool async) { @@ -361,22 +345,29 @@ async Task Cancel(bool async) { _writeBuf.EndCopyMode(); _writeBuf.Clear(); - await _connector.WriteCopyFail(async); - await _connector.Flush(async); + await _connector.WriteCopyFail(async).ConfigureAwait(false); + await _connector.Flush(async).ConfigureAwait(false); try { - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); // The CopyFail should immediately trigger an exception from the read above. throw _connector.Break( new NpgsqlException("Expected ErrorResponse when cancelling COPY but got: " + msg.Code)); } catch (PostgresException e) { - _connector.EndUserAction(); + // TODO: NpgsqlBinaryImporter doesn't cleanup on cancellation + // And instead relies on users disposing the object + // We probably should do the same here Cleanup(); if (e.SqlState != PostgresErrorCodes.QueryCanceled) + { + TraceSetException(e); throw; + } + + TraceStop(); } } else @@ -391,17 +382,12 @@ async Task Cancel(bool async) protected override void Dispose(bool disposing) => DisposeAsync(disposing, false).GetAwaiter().GetResult(); -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else public override ValueTask DisposeAsync() -#endif => DisposeAsync(disposing: true, async: true); - async ValueTask DisposeAsync(bool disposing, bool async) { - if (_isDisposed || !disposing) + if (_state == CopyStreamState.Disposed || !disposing) return; try @@ -410,39 +396,51 @@ async ValueTask DisposeAsync(bool disposing, bool async) if (CanWrite) { - await FlushAsync(async); - _writeBuf.EndCopyMode(); - await _connector.WriteCopyDone(async); - await _connector.Flush(async); - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + try + { + await FlushAsync(async).ConfigureAwait(false); + _writeBuf.EndCopyMode(); + await _connector.WriteCopyDone(async).ConfigureAwait(false); + await _connector.Flush(async).ConfigureAwait(false); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + TraceStop(); + } + catch (Exception e) + { + TraceSetException(e); + throw; + } } else { - if (!_isConsumed) + try { - try + if (_state != CopyStreamState.Consumed && _state != CopyStreamState.Uninitialized) { if (_leftToReadInDataMsg > 0) { - await _readBuf.Skip(_leftToReadInDataMsg, async); + await _readBuf.Skip(async, _leftToReadInDataMsg).ConfigureAwait(false); } _connector.SkipUntil(BackendMessageCode.ReadyForQuery); } - catch (OperationCanceledException e) when (e.InnerException is PostgresException pg && pg.SqlState == PostgresErrorCodes.QueryCanceled) - { - LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); - } - catch (Exception e) - { - LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); - } + + TraceStop(); + } + catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) + { + LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); + TraceStop(); + } + catch (Exception e) + { + LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); + TraceSetException(e); } } } finally { - _connector.EndUserAction(); Cleanup(); } } @@ -450,20 +448,20 @@ async ValueTask DisposeAsync(bool disposing, bool async) #pragma warning disable CS8625 void Cleanup() { - Debug.Assert(!_isDisposed); + Debug.Assert(_state != CopyStreamState.Disposed); LogMessages.CopyOperationCompleted(_copyLogger, _connector.Id); + _connector.EndUserAction(); _connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null; _readBuf = null; _writeBuf = null; - _isDisposed = true; + _state = CopyStreamState.Disposed; } #pragma warning restore CS8625 void CheckDisposed() { - if (_isDisposed) { + if (_state == CopyStreamState.Disposed) { throw new ObjectDisposedException(nameof(NpgsqlRawCopyStream), "The COPY operation has already ended."); } } @@ -474,15 +472,9 @@ void CheckDisposed() public override bool CanSeek => false; - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotSupportedException(); - } + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); - public override void SetLength(long value) - { - throw new NotSupportedException(); - } + public override void SetLength(long value) => throw new NotSupportedException(); public override long Length => throw new NotSupportedException(); @@ -497,16 +489,63 @@ public override long Position #region Input validation static void ValidateArguments(byte[] buffer, int offset, int count) { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentNullException(nameof(offset)); - if (count < 0) - throw new ArgumentNullException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } #endregion + + #region Tracing + + private void TraceSetImport() + { + if (_activity is not null) + { + NpgsqlActivitySource.SetOperation(_activity, "COPY FROM"); + } + } + + private void TraceSetExport() + { + if (_activity is not null) + { + NpgsqlActivitySource.SetOperation(_activity, "COPY TO"); + } + } + + private void TraceStop() + { + if (_activity is not null) + { + NpgsqlActivitySource.CopyStop(_activity); + _activity = null; + } + } + + private void TraceSetException(Exception e) + { + if (_activity is not null) + { + NpgsqlActivitySource.SetException(_activity, e); + _activity = null; + } + } + + #endregion + + #region Enums + + enum CopyStreamState + { + Uninitialized, + Ready, + Consumed, + Disposed + } + + #endregion Enums } /// @@ -523,6 +562,20 @@ internal NpgsqlCopyTextWriter(NpgsqlConnector connector, NpgsqlRawCopyStream und throw connector.Break(new Exception("Can't use a binary copy stream for text writing")); } + /// + /// Gets or sets a value, in milliseconds, that determines how long the text writer will attempt to write before timing out. + /// + public int Timeout + { + get => ((NpgsqlRawCopyStream)BaseStream).WriteTimeout; + set + { + var stream = (NpgsqlRawCopyStream)BaseStream; + stream.ReadTimeout = value; + stream.WriteTimeout = value; + } + } + /// /// Cancels and terminates an ongoing import. Any data already written will be discarded. /// @@ -532,19 +585,7 @@ public void Cancel() /// /// Cancels and terminates an ongoing import. Any data already written will be discarded. /// - public Task CancelAsync() - { - using (NoSynchronizationContextScope.Enter()) - return ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); - } - -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() - { - Dispose(); - return default; - } -#endif + public Task CancelAsync() => ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); } /// @@ -561,6 +602,20 @@ internal NpgsqlCopyTextReader(NpgsqlConnector connector, NpgsqlRawCopyStream und throw connector.Break(new Exception("Can't use a binary copy stream for text reading")); } + /// + /// Gets or sets a value, in milliseconds, that determines how long the text reader will attempt to read before timing out. + /// + public int Timeout + { + get => ((NpgsqlRawCopyStream)BaseStream).ReadTimeout; + set + { + var stream = (NpgsqlRawCopyStream)BaseStream; + stream.ReadTimeout = value; + stream.WriteTimeout = value; + } + } + /// /// Cancels and terminates an ongoing export. /// @@ -570,15 +625,11 @@ public void Cancel() /// /// Asynchronously cancels and terminates an ongoing export. /// - public Task CancelAsync() - { - using (NoSynchronizationContextScope.Enter()) - return ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); - } + public Task CancelAsync() => ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); public ValueTask DisposeAsync() { Dispose(); return default; } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 75c5e857dc..aea2f6e925 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -1,11 +1,12 @@ using System; +using System.Collections.Generic; using System.Data; using System.Data.Common; using System.Globalization; -using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.Internal; using Npgsql.PostgresTypes; using NpgsqlTypes; @@ -16,10 +17,9 @@ namespace Npgsql; /// static class NpgsqlSchema { - public static Task GetSchema(NpgsqlConnection conn, string? collectionName, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + public static Task GetSchema(bool async, NpgsqlConnection conn, string? collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) { - if (collectionName is null) - throw new ArgumentNullException(nameof(collectionName)); + ArgumentNullException.ThrowIfNull(collectionName); if (collectionName.Length == 0) throw new ArgumentException("Collection name cannot be empty.", nameof(collectionName)); @@ -36,6 +36,7 @@ public static Task GetSchema(NpgsqlConnection conn, string? collectio "TABLES" => GetTables(conn, restrictions, async, cancellationToken), "COLUMNS" => GetColumns(conn, restrictions, async, cancellationToken), "VIEWS" => GetViews(conn, restrictions, async, cancellationToken), + "MATERIALIZEDVIEWS" => GetMaterializedViews(conn, restrictions, async, cancellationToken), "USERS" => GetUsers(conn, restrictions, async, cancellationToken), "INDEXES" => GetIndexes(conn, restrictions, async, cancellationToken), "INDEXCOLUMNS" => GetIndexColumns(conn, restrictions, async, cancellationToken), @@ -124,7 +125,7 @@ static NpgsqlCommand BuildCommand(NpgsqlConnection conn, StringBuilder query, st { for (var i = 0; i < restrictions.Length && i < names.Length; ++i) { - if (restrictions[i] is string restriction && restriction.Length != 0) + if (restrictions[i] is { Length: > 0 } restriction) { if (addWhere) { @@ -153,174 +154,314 @@ static NpgsqlCommand BuildCommand(NpgsqlConnection conn, StringBuilder query, st static string RemoveSpecialChars(string paramName) => paramName.Replace("(", "").Replace(")", "").Replace(".", ""); - static async Task GetDatabases(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) - { - var databases = new DataTable("Databases") { Locale = CultureInfo.InvariantCulture }; - - databases.Columns.AddRange(new[] { - new DataColumn("database_name"), - new DataColumn("owner"), - new DataColumn("encoding") - }); - var getDatabases = new StringBuilder(); + static Task GetDatabases(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + { + var dataTable = new DataTable("Databases") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("database_name"), + new DataColumn("owner"), + new DataColumn("encoding") + } + }; - getDatabases.Append("SELECT d.datname AS database_name, u.usename AS owner, pg_catalog.pg_encoding_to_char(d.encoding) AS encoding FROM pg_catalog.pg_database d LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid"); + var sql = new StringBuilder(); - using var command = BuildCommand(conn, getDatabases, restrictions, "datname"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(databases, async, cancellationToken); + sql.Append( + """ +SELECT d.datname, u.usename, pg_catalog.pg_encoding_to_char(d.encoding) +FROM pg_catalog.pg_database d +LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid +"""); - return databases; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "datname"), + dataTable, + (reader, row) => + { + row["database_name"] = GetFieldValueOrDBNull(reader, 0); + row["owner"] = GetFieldValueOrDBNull(reader, 1); + row["encoding"] = GetFieldValueOrDBNull(reader, 2); + }, cancellationToken); } - static async Task GetSchemata(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetSchemata(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var schemata = new DataTable("Schemata") { Locale = CultureInfo.InvariantCulture }; - - schemata.Columns.AddRange(new[] { - new DataColumn("catalog_name"), - new DataColumn("schema_name"), - new DataColumn("schema_owner") - }); + var dataTable = new DataTable("Schemata") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("catalog_name"), + new DataColumn("schema_name"), + new DataColumn("schema_owner") + } + }; - var getSchemata = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT * FROM ( - SELECT current_database() AS catalog_name, - nspname AS schema_name, - r.rolname AS schema_owner - FROM - pg_catalog.pg_namespace LEFT JOIN pg_catalog.pg_roles r ON r.oid = nspowner - ) tmp"); - - using var command = BuildCommand(conn, getSchemata, restrictions, "catalog_name", "schema_name", "schema_owner"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(schemata, async, cancellationToken); - - return schemata; + SELECT current_database(), nspname, r.rolname + FROM pg_catalog.pg_namespace + LEFT JOIN pg_catalog.pg_roles r ON r.oid = nspowner +) tmp +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "catalog_name", "schema_name", "schema_owner"), + dataTable, + (reader, row) => + { + row["catalog_name"] = GetFieldValueOrDBNull(reader, 0); + row["schema_name"] = GetFieldValueOrDBNull(reader, 1); + row["schema_owner"] = GetFieldValueOrDBNull(reader, 2); + }, cancellationToken); } - - static async Task GetTables(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetTables(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var tables = new DataTable("Tables") { Locale = CultureInfo.InvariantCulture }; - - tables.Columns.AddRange(new[] { - new DataColumn("table_catalog"), - new DataColumn("table_schema"), - new DataColumn("table_name"), - new DataColumn("table_type") - }); + var dataTable = new DataTable("Tables") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("table_type") + } + }; - var getTables = new StringBuilder(); + var sql = new StringBuilder(); - getTables.Append(@" + sql.Append( + """ SELECT table_catalog, table_schema, table_name, table_type FROM information_schema.tables WHERE table_type IN ('BASE TABLE', 'FOREIGN', 'FOREIGN TABLE') AND - table_schema NOT IN ('pg_catalog', 'information_schema')"); - - using var command = BuildCommand(conn, getTables, restrictions, false, "table_catalog", "table_schema", "table_name", "table_type"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(tables, async, cancellationToken); - - return tables; + table_schema NOT IN ('pg_catalog', 'information_schema') +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "table_catalog", "table_schema", "table_name", "table_type"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_type"] = GetFieldValueOrDBNull(reader, 3); + }, cancellationToken); } - static async Task GetColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var columns = new DataTable("Columns") { Locale = CultureInfo.InvariantCulture }; - - columns.Columns.AddRange(new DataColumn[] { - new("table_catalog"), new("table_schema"), new("table_name"), new("column_name"), - new("ordinal_position", typeof(int)), - new("column_default"), - new("is_nullable"), - new("data_type"), - new("character_maximum_length", typeof(int)), new("character_octet_length", typeof(int)), - new("numeric_precision", typeof(int)), new("numeric_precision_radix", typeof(int)), new("numeric_scale", typeof(int)), - new("datetime_precision", typeof(int)), - new("character_set_catalog"), new("character_set_schema"), new("character_set_name"), - new("collation_catalog") - }); - - var getColumns = new StringBuilder(@" + var dataTable = new DataTable("Columns") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("column_name"), + new DataColumn("ordinal_position", typeof(int)), + new DataColumn("column_default"), + new DataColumn("is_nullable"), + new DataColumn("data_type"), + new DataColumn("character_maximum_length", typeof(int)), + new DataColumn("character_octet_length", typeof(int)), + new DataColumn("numeric_precision", typeof(int)), + new DataColumn("numeric_precision_radix", typeof(int)), + new DataColumn("numeric_scale", typeof(int)), + new DataColumn("datetime_precision", typeof(int)), + new DataColumn("character_set_catalog"), + new DataColumn("character_set_schema"), + new DataColumn("character_set_name"), + new DataColumn("collation_catalog") + } + }; + + var sql = new StringBuilder( + """ SELECT - table_catalog, table_schema, table_name, column_name, + table_catalog, + table_schema, + table_name, + column_name, ordinal_position, column_default, is_nullable, - CASE WHEN udt_schema is NULL THEN udt_name ELSE format_type(typ.oid, NULL) END AS data_type, - character_maximum_length, character_octet_length, - numeric_precision, numeric_precision_radix, numeric_scale, + CASE WHEN udt_schema is NULL THEN udt_name ELSE format_type(typ.oid, NULL) END, + character_maximum_length, + character_octet_length, + numeric_precision, + numeric_precision_radix, + numeric_scale, datetime_precision, - character_set_catalog, character_set_schema, character_set_name, + character_set_catalog, + character_set_schema, + character_set_name, collation_catalog FROM information_schema.columns JOIN pg_namespace AS ns ON ns.nspname = udt_schema -JOIN pg_type AS typ ON typnamespace = ns.oid AND typname = udt_name"); - - using var command = BuildCommand(conn, getColumns, restrictions, "table_catalog", "table_schema", "table_name", "column_name"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(columns, async, cancellationToken); - - return columns; +JOIN pg_type AS typ ON typnamespace = ns.oid AND typname = udt_name +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "table_catalog", "table_schema", "table_name", "column_name"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["column_name"] = GetFieldValueOrDBNull(reader, 3); + row["ordinal_position"] = GetFieldValueOrDBNull(reader, 4); + row["column_default"] = GetFieldValueOrDBNull(reader, 5); + row["is_nullable"] = GetFieldValueOrDBNull(reader, 6); + row["data_type"] = GetFieldValueOrDBNull(reader, 7); + row["character_maximum_length"] = GetFieldValueOrDBNull(reader, 8); + row["character_octet_length"] = GetFieldValueOrDBNull(reader, 9); + row["numeric_precision"] = GetFieldValueOrDBNull(reader, 10); + row["numeric_precision_radix"] = GetFieldValueOrDBNull(reader, 11); + row["numeric_scale"] = GetFieldValueOrDBNull(reader, 12); + row["datetime_precision"] = GetFieldValueOrDBNull(reader, 13); + row["character_set_catalog"] = GetFieldValueOrDBNull(reader, 14); + row["character_set_schema"] = GetFieldValueOrDBNull(reader, 15); + row["character_set_name"] = GetFieldValueOrDBNull(reader, 16); + row["collation_catalog"] = GetFieldValueOrDBNull(reader, 17); + }, cancellationToken); } - static async Task GetViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var views = new DataTable("Views") { Locale = CultureInfo.InvariantCulture }; - - views.Columns.AddRange(new[] { - new DataColumn("table_catalog"), new DataColumn("table_schema"), new DataColumn("table_name"), - new DataColumn("check_option"), new DataColumn("is_updatable") - }); + var dataTable = new DataTable("Views") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("check_option"), + new DataColumn("is_updatable") + } + }; - var getViews = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT table_catalog, table_schema, table_name, check_option, is_updatable FROM information_schema.views -WHERE table_schema NOT IN ('pg_catalog', 'information_schema')"); - - using var command = BuildCommand(conn, getViews, restrictions, false, "table_catalog", "table_schema", "table_name"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(views, async, cancellationToken); - - return views; +WHERE table_schema NOT IN ('pg_catalog', 'information_schema') +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "table_catalog", "table_schema", "table_name"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["check_option"] = GetFieldValueOrDBNull(reader, 3); + row["is_updatable"] = GetFieldValueOrDBNull(reader, 3); + }, cancellationToken); } - static async Task GetUsers(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetMaterializedViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var users = new DataTable("Users") { Locale = CultureInfo.InvariantCulture }; + var dataTable = new DataTable("MaterializedViews") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("table_owner"), + new DataColumn("has_indexes", typeof(bool)), + new DataColumn("is_populated", typeof(bool)) + } + }; - users.Columns.AddRange(new[] { new DataColumn("user_name"), new DataColumn("user_sysid", typeof(int)) }); + var sql = new StringBuilder(); - var getUsers = new StringBuilder(); + sql.Append("""SELECT current_database(), schemaname, matviewname, matviewowner, hasindexes, ispopulated FROM pg_catalog.pg_matviews"""); - getUsers.Append("SELECT usename as user_name, usesysid as user_sysid FROM pg_catalog.pg_user"); + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "current_database()", "schemaname", "matviewname", "matviewowner"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_owner"] = GetFieldValueOrDBNull(reader, 3); + row["has_indexes"] = GetFieldValueOrDBNull(reader, 4); + row["is_populated"] = GetFieldValueOrDBNull(reader, 5); + }, cancellationToken); + } - using var command = BuildCommand(conn, getUsers, restrictions, "usename"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(users, async, cancellationToken); + static Task GetUsers(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + { + var dataTable = new DataTable("Users") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("user_name"), + new DataColumn("user_sysid", typeof(uint)) + } + }; - return users; + var sql = new StringBuilder(); + + sql.Append("SELECT usename, usesysid FROM pg_catalog.pg_user"); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "usename"), + dataTable, + (reader, row) => + { + row["user_name"] = GetFieldValueOrDBNull(reader, 0); + row["user_sysid"] = GetFieldValueOrDBNull(reader, 1); + }, cancellationToken); } - static async Task GetIndexes(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetIndexes(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var indexes = new DataTable("Indexes") { Locale = CultureInfo.InvariantCulture }; - - indexes.Columns.AddRange(new[] { - new DataColumn("table_catalog"), new DataColumn("table_schema"), new DataColumn("table_name"), - new DataColumn("index_name"), new DataColumn("type_desc") - }); - - var getIndexes = new StringBuilder(@" -SELECT current_database() AS table_catalog, - n.nspname AS table_schema, - t.relname AS table_name, - i.relname AS index_name, - '' AS type_desc + var dataTable = new DataTable("Indexes") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("index_name"), + new DataColumn("type_desc") + } + }; + + var sql = new StringBuilder( + """ +SELECT current_database(), + n.nspname, + t.relname, + i.relname, + '' FROM pg_catalog.pg_class i JOIN pg_catalog.pg_index ix ON ix.indexrelid = i.oid @@ -330,35 +471,52 @@ pg_catalog.pg_class i WHERE i.relkind = 'i' AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND - t.relkind = 'r'"); - - using var command = BuildCommand(conn, getIndexes, restrictions, false, "current_database()", "n.nspname", "t.relname", "i.relname"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(indexes, async, cancellationToken); - - return indexes; + t.relkind = 'r' +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "n.nspname", "t.relname", "i.relname"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["index_name"] = GetFieldValueOrDBNull(reader, 3); + row["type_desc"] = GetFieldValueOrDBNull(reader, 4); + }, cancellationToken); } - static async Task GetIndexColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetIndexColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var indexColumns = new DataTable("IndexColumns") { Locale = CultureInfo.InvariantCulture }; - - indexColumns.Columns.AddRange(new[] { - new DataColumn("constraint_catalog"), new DataColumn("constraint_schema"), new DataColumn("constraint_name"), - new DataColumn("table_catalog"), new DataColumn("table_schema"), new DataColumn("table_name"), - new DataColumn("column_name"), new DataColumn("index_name") - }); + var dataTable = new DataTable("IndexColumns") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("constraint_catalog"), + new DataColumn("constraint_schema"), + new DataColumn("constraint_name"), + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("column_name"), + new DataColumn("index_name") + } + }; - var getIndexColumns = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT - current_database() AS constraint_catalog, - t_ns.nspname AS constraint_schema, - ix_cls.relname AS constraint_name, - current_database() AS table_catalog, - ix_ns.nspname AS table_schema, - t.relname AS table_name, - a.attname AS column_name, - ix_cls.relname AS index_name + current_database(), + t_ns.nspname, + ix_cls.relname, + current_database(), + ix_ns.nspname, + t.relname, + a.attname, + ix_cls.relname FROM pg_class t JOIN pg_index ix ON t.oid = ix.indrelid @@ -370,69 +528,117 @@ pg_class t ix_cls.relkind = 'i' AND t_ns.nspname NOT IN ('pg_catalog', 'pg_toast') AND a.attnum = ANY(ix.indkey) AND - t.relkind = 'r'"); - - using var command = BuildCommand(conn, getIndexColumns, restrictions, false, "current_database()", "t_ns.nspname", "t.relname", "ix_cls.relname", "a.attname"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(indexColumns, async, cancellationToken); - - return indexColumns; + t.relkind = 'r' +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "t_ns.nspname", "t.relname", "ix_cls.relname", "a.attname"), + dataTable, + (reader, row) => + { + row["constraint_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["constraint_schema"] = GetFieldValueOrDBNull(reader, 1); + row["constraint_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_catalog"] = GetFieldValueOrDBNull(reader, 3); + row["table_schema"] = GetFieldValueOrDBNull(reader, 4); + row["table_name"] = GetFieldValueOrDBNull(reader, 5); + row["column_name"] = GetFieldValueOrDBNull(reader, 6); + row["index_name"] = GetFieldValueOrDBNull(reader, 7); + }, cancellationToken); } - static async Task GetConstraints(NpgsqlConnection conn, string?[]? restrictions, string? constraintType, bool async, CancellationToken cancellationToken = default) + static Task GetConstraints(NpgsqlConnection conn, string?[]? restrictions, string? constraintType, bool async, CancellationToken cancellationToken = default) { - var getConstraints = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT - current_database() AS ""CONSTRAINT_CATALOG"", - pgn.nspname AS ""CONSTRAINT_SCHEMA"", - pgc.conname AS ""CONSTRAINT_NAME"", - current_database() AS ""TABLE_CATALOG"", - pgtn.nspname AS ""TABLE_SCHEMA"", - pgt.relname AS ""TABLE_NAME"", - ""CONSTRAINT_TYPE"", - pgc.condeferrable AS ""IS_DEFERRABLE"", - pgc.condeferred AS ""INITIALLY_DEFERRED"" + current_database(), + pgn.nspname, + pgc.conname, + current_database(), + pgtn.nspname, + pgt.relname, + constraint_type, + pgc.condeferrable, + pgc.condeferred FROM pg_catalog.pg_constraint pgc JOIN pg_catalog.pg_namespace pgn ON pgc.connamespace = pgn.oid JOIN pg_catalog.pg_class pgt ON pgc.conrelid = pgt.oid JOIN pg_catalog.pg_namespace pgtn ON pgt.relnamespace = pgtn.oid JOIN ( - SELECT 'PRIMARY KEY' AS ""CONSTRAINT_TYPE"", 'p' AS ""contype"" + SELECT 'PRIMARY KEY' AS constraint_type, 'p' AS contype UNION ALL - SELECT 'FOREIGN KEY' AS ""CONSTRAINT_TYPE"", 'f' AS ""contype"" + SELECT 'FOREIGN KEY' AS constraint_type, 'f' AS contype UNION ALL - SELECT 'UNIQUE KEY' AS ""CONSTRAINT_TYPE"", 'u' AS ""contype"" -) mapping_table ON mapping_table.contype = pgc.contype"); - if ("ForeignKeys".Equals(constraintType)) - getConstraints.Append(" and pgc.contype='f'"); - else if ("PrimaryKey".Equals(constraintType)) - getConstraints.Append(" and pgc.contype='p'"); - else if ("UniqueKeys".Equals(constraintType)) - getConstraints.Append(" and pgc.contype='u'"); - else - constraintType = "Constraints"; + SELECT 'UNIQUE KEY' AS constraint_type, 'u' AS contype +) mapping_table ON mapping_table.contype = pgc.contype +"""); - using var command = BuildCommand(conn, getConstraints, restrictions, false, "current_database()", "pgtn.nspname", "pgt.relname", "pgc.conname"); - using var adapter = new NpgsqlDataAdapter(command); - var table = new DataTable(constraintType) { Locale = CultureInfo.InvariantCulture }; + switch (constraintType) + { + case "ForeignKeys": + sql.Append(" and pgc.contype='f'"); + break; + case "PrimaryKey": + sql.Append(" and pgc.contype='p'"); + break; + case "UniqueKeys": + sql.Append(" and pgc.contype='u'"); + break; + default: + constraintType = "Constraints"; + break; + } - await adapter.Fill(table, async, cancellationToken); + var dataTable = new DataTable(constraintType) + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("CONSTRAINT_CATALOG"), + new DataColumn("CONSTRAINT_SCHEMA"), + new DataColumn("CONSTRAINT_NAME"), + new DataColumn("TABLE_CATALOG"), + new DataColumn("TABLE_SCHEMA"), + new DataColumn("TABLE_NAME"), + new DataColumn("CONSTRAINT_TYPE"), + new DataColumn("IS_DEFERRABLE", typeof(bool)), + new DataColumn("INITIALLY_DEFERRED", typeof(bool)) + } + }; - return table; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "pgtn.nspname", "pgt.relname", "pgc.conname"), + dataTable, + (reader, row) => + { + row["CONSTRAINT_CATALOG"] = GetFieldValueOrDBNull(reader, 0); + row["CONSTRAINT_SCHEMA"] = GetFieldValueOrDBNull(reader, 1); + row["CONSTRAINT_NAME"] = GetFieldValueOrDBNull(reader, 2); + row["TABLE_CATALOG"] = GetFieldValueOrDBNull(reader, 3); + row["TABLE_SCHEMA"] = GetFieldValueOrDBNull(reader, 4); + row["TABLE_NAME"] = GetFieldValueOrDBNull(reader, 5); + row["CONSTRAINT_TYPE"] = GetFieldValueOrDBNull(reader, 6); + row["IS_DEFERRABLE"] = GetFieldValueOrDBNull(reader, 7); + row["INITIALLY_DEFERRED"] = GetFieldValueOrDBNull(reader, 8); + }, cancellationToken); } - static async Task GetConstraintColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetConstraintColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var getConstraintColumns = new StringBuilder(@" -SELECT current_database() AS constraint_catalog, - n.nspname AS constraint_schema, - c.conname AS constraint_name, - current_database() AS table_catalog, - n.nspname AS table_schema, - t.relname AS table_name, - a.attname AS column_name, - a.attnum AS ordinal_number, + var sql = new StringBuilder( + """ +SELECT current_database(), + n.nspname, + c.conname, + current_database(), + n.nspname, + t.relname, + a.attname, + a.attnum, mapping_table.constraint_type FROM pg_constraint c JOIN pg_namespace n on n.oid = c.connamespace @@ -446,15 +652,42 @@ UNION ALL SELECT 'UNIQUE KEY' AS constraint_type, 'u' AS contype ) mapping_table ON mapping_table.contype = c.contype - AND n.nspname NOT IN ('pg_catalog', 'pg_toast')"); + AND n.nspname NOT IN ('pg_catalog', 'pg_toast') +"""); - using var command = BuildCommand(conn, getConstraintColumns, restrictions, false, "current_database()", "n.nspname", "t.relname", "c.conname", "a.attname"); - using var adapter = new NpgsqlDataAdapter(command); - var table = new DataTable("ConstraintColumns") { Locale = CultureInfo.InvariantCulture }; - - await adapter.Fill(table, async, cancellationToken); + var dataTable = new DataTable("ConstraintColumns") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("constraint_catalog"), + new DataColumn("constraint_schema"), + new DataColumn("constraint_name"), + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("column_name"), + new DataColumn("ordinal_number", typeof(int)), + new DataColumn("constraint_type") + } + }; - return table; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "n.nspname", "t.relname", "c.conname", "a.attname"), + dataTable, + (reader, row) => + { + row["constraint_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["constraint_schema"] = GetFieldValueOrDBNull(reader, 1); + row["constraint_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_catalog"] = GetFieldValueOrDBNull(reader, 3); + row["table_schema"] = GetFieldValueOrDBNull(reader, 4); + row["table_name"] = GetFieldValueOrDBNull(reader, 5); + row["column_name"] = GetFieldValueOrDBNull(reader, 6); + row["ordinal_number"] = GetFieldValueOrDBNull(reader, 7); + row["constraint_type"] = GetFieldValueOrDBNull(reader, 8); + }, cancellationToken); } static DataTable GetDataSourceInformation(NpgsqlConnection conn) @@ -525,7 +758,8 @@ static DataTable GetDataSourceInformation(NpgsqlConnection conn) static DataTable GetDataTypes(NpgsqlConnection conn) { - using var _ = conn.StartTemporaryBindingScope(out var connector); + conn.CheckReady(); + var connector = conn.Connector!; var table = new DataTable("DataTypes"); @@ -557,105 +791,112 @@ static DataTable GetDataTypes(NpgsqlConnection conn) table.Columns.Add("OID", typeof(uint)); // TODO: Support type name restriction - - foreach (var baseType in connector.DatabaseInfo.BaseTypes.Cast() - .Concat(connector.DatabaseInfo.EnumTypes) - .Concat(connector.DatabaseInfo.CompositeTypes)) + try { - if (!connector.TypeMapper.TryGetMapping(baseType, out var mapping)) - continue; + var serializerOptions = connector.SerializerOptions; + PgSerializerOptions.IntrospectionCaller = true; + + var types = new List(); + types.AddRange(connector.DatabaseInfo.BaseTypes); + types.AddRange(connector.DatabaseInfo.EnumTypes); + types.AddRange(connector.DatabaseInfo.CompositeTypes); + foreach (var baseType in types) + { + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(baseType)) is not { } info) + continue; - var row = table.Rows.Add(); + var row = table.Rows.Add(); - PopulateDefaultDataTypeInfo(row, baseType); - PopulateHardcodedDataTypeInfo(row, baseType); + PopulateDefaultDataTypeInfo(row, baseType); + PopulateHardcodedDataTypeInfo(row, baseType); - if (mapping.ClrTypes.Length > 0) - row["DataType"] = mapping.ClrTypes[0].FullName; - if (mapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)mapping.NpgsqlDbType.Value; - } + row["DataType"] = info.Type.FullName; + if (baseType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } - foreach (var arrayType in connector.DatabaseInfo.ArrayTypes) - { - if (!connector.TypeMapper.TryGetMapping(arrayType.Element, out var elementMapping)) - continue; - - var row = table.Rows.Add(); - - PopulateDefaultDataTypeInfo(row, arrayType.Element); - // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, arrayType.Element); - - row["TypeName"] = arrayType.DisplayName; - row["OID"] = arrayType.OID; - row["CreateFormat"] += "[]"; - if (elementMapping.ClrTypes.Length > 0) - row["DataType"] = elementMapping.ClrTypes[0].MakeArrayType().FullName; - if (elementMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)(elementMapping.NpgsqlDbType.Value | NpgsqlDbType.Array); - } + foreach (var arrayType in connector.DatabaseInfo.ArrayTypes) + { + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(arrayType)) is not { } info) + continue; - foreach (var rangeType in connector.DatabaseInfo.RangeTypes) - { - if (!connector.TypeMapper.TryGetMapping(rangeType.Subtype, out var subtypeMapping)) - continue; - - var row = table.Rows.Add(); - - PopulateDefaultDataTypeInfo(row, rangeType.Subtype); - // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, rangeType.Subtype); - - row["TypeName"] = rangeType.DisplayName; - row["OID"] = rangeType.OID; - row["CreateFormat"] = rangeType.DisplayName.ToUpperInvariant(); - if (subtypeMapping.ClrTypes.Length > 0) - row["DataType"] = typeof(NpgsqlRange<>).MakeGenericType(subtypeMapping.ClrTypes[0]).FullName; - if (subtypeMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)(subtypeMapping.NpgsqlDbType.Value | NpgsqlDbType.Range); - } + var row = table.Rows.Add(); - foreach (var multirangeType in connector.DatabaseInfo.MultirangeTypes) - { - var subtypeType = multirangeType.Subrange.Subtype; - if (!connector.TypeMapper.TryGetMapping(subtypeType, out var subtypeMapping)) - continue; - - var row = table.Rows.Add(); - - PopulateDefaultDataTypeInfo(row, subtypeType); - // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, subtypeType); - - row["TypeName"] = multirangeType.DisplayName; - row["OID"] = multirangeType.OID; - row["CreateFormat"] = multirangeType.DisplayName.ToUpperInvariant(); - if (subtypeMapping.ClrTypes.Length > 0) - row["DataType"] = typeof(NpgsqlRange<>).MakeGenericType(subtypeMapping.ClrTypes[0]).FullName; - if (subtypeMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)(subtypeMapping.NpgsqlDbType.Value | NpgsqlDbType.Range); - } + PopulateDefaultDataTypeInfo(row, arrayType.Element); + // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, arrayType.Element); - foreach (var domainType in connector.DatabaseInfo.DomainTypes) - { - if (!connector.TypeMapper.TryGetMapping(domainType, out var baseMapping)) - continue; + row["TypeName"] = arrayType.DisplayName; + row["OID"] = arrayType.OID; + row["CreateFormat"] += "[]"; + row["DataType"] = info.Type.FullName; + if (arrayType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } - var row = table.Rows.Add(); + foreach (var rangeType in connector.DatabaseInfo.RangeTypes) + { + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(rangeType)) is not { } info) + continue; - PopulateDefaultDataTypeInfo(row, domainType.BaseType); - // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, domainType.BaseType); - row["TypeName"] = domainType.DisplayName; - row["OID"] = domainType.OID; - // A domain is never the best match, since its underlying base type is - row["IsBestMatch"] = false; + var row = table.Rows.Add(); + + PopulateDefaultDataTypeInfo(row, rangeType.Subtype); + // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, rangeType.Subtype); + + row["TypeName"] = rangeType.DisplayName; + row["OID"] = rangeType.OID; + row["CreateFormat"] = rangeType.DisplayName.ToUpperInvariant(); + row["DataType"] = info.Type.FullName; + if (rangeType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } - if (baseMapping.ClrTypes.Length > 0) - row["DataType"] = baseMapping.ClrTypes[0].FullName; - if (baseMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)baseMapping.NpgsqlDbType.Value; + foreach (var multirangeType in connector.DatabaseInfo.MultirangeTypes) + { + var subtypeType = multirangeType.Subrange.Subtype; + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(multirangeType)) is not { } info) + continue; + + var row = table.Rows.Add(); + + PopulateDefaultDataTypeInfo(row, subtypeType); + // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, subtypeType); + + row["TypeName"] = multirangeType.DisplayName; + row["OID"] = multirangeType.OID; + row["CreateFormat"] = multirangeType.DisplayName.ToUpperInvariant(); + row["DataType"] = info.Type.FullName; + if (multirangeType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } + + foreach (var domainType in connector.DatabaseInfo.DomainTypes) + { + var representationalType = domainType.GetRepresentationalType(); + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(representationalType)) is not { } info) + continue; + + var row = table.Rows.Add(); + + PopulateDefaultDataTypeInfo(row, representationalType); + // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, representationalType); + row["TypeName"] = domainType.DisplayName; + row["OID"] = domainType.OID; + // A domain is never the best match, since its underlying base type is + row["IsBestMatch"] = false; + + row["DataType"] = info.Type.FullName; + if (representationalType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } + } + finally + { + PgSerializerOptions.IntrospectionCaller = false; } return table; @@ -765,7 +1006,7 @@ static DataTable GetReservedWords() /// List of keywords taken from PostgreSQL 9.0 reserved words documentation. /// static readonly string[] ReservedKeywords = - { + [ "ALL", "ANALYSE", "ANALYZE", @@ -865,7 +1106,44 @@ static DataTable GetReservedWords() "WHERE", "WINDOW", "WITH" - }; + ]; #endregion Reserved Keywords + + static async Task ParseResults(bool async, NpgsqlCommand command, DataTable dataTable, Action populateRow, CancellationToken cancellationToken) + { + NpgsqlDataReader? reader = null; + try + { + reader = async + ? await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false) + : command.ExecuteReader(); + + dataTable.BeginLoadData(); + + while (async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read()) + populateRow(reader, dataTable.Rows.Add()); + + return dataTable; + } + finally + { + dataTable.EndLoadData(); + + if (async) + { + if (reader is not null) + await reader.DisposeAsync().ConfigureAwait(false); + await command.DisposeAsync().ConfigureAwait(false); + } + else + { + reader?.Dispose(); + command.Dispose(); + } + } + } + + static object GetFieldValueOrDBNull(NpgsqlDataReader reader, int ordinal) + => reader.IsDBNull(ordinal) ? DBNull.Value : reader.GetFieldValue(ordinal)!; } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs new file mode 100644 index 0000000000..a9f7a4717e --- /dev/null +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -0,0 +1,966 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Net.Security; +using System.Security.Cryptography.X509Certificates; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Npgsql.Internal; +using Npgsql.Internal.ResolverFactories; +using Npgsql.NameTranslation; +using Npgsql.Properties; +using Npgsql.TypeMapping; +using NpgsqlTypes; + +namespace Npgsql; + +/// +/// Provides a simple API for configuring and creating an , from which database connections can be obtained. +/// +/// +/// On this builder, various features are disabled by default; unless you're looking to save on code size (e.g. when publishing with +/// NativeAOT), use instead. +/// +public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper +{ + static UnsupportedTypeInfoResolver UnsupportedTypeInfoResolver { get; } = new(); + + ILoggerFactory? _loggerFactory; + bool _sensitiveDataLoggingEnabled; + List>? _tracingOptionsBuilderCallbacks; + List>? _typeLoadingOptionsBuilderCallbacks; + + TransportSecurityHandler _transportSecurityHandler = new(); + RemoteCertificateValidationCallback? _userCertificateValidationCallback; + Action? _clientCertificatesCallback; + Action? _sslClientAuthenticationOptionsCallback; + + Action? _negotiateOptionsCallback; + + IntegratedSecurityHandler _integratedSecurityHandler = new(); + + Func? _passwordProvider; + Func>? _passwordProviderAsync; + + Func>? _periodicPasswordProvider; + TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; + + List? _dbTypeResolverFactories; + PgTypeInfoResolverChainBuilder _resolverChainBuilder = new(); // mutable struct, don't make readonly. + + readonly UserTypeMapper _userTypeMapper; + + Action? _connectionInitializer; + Func? _connectionInitializerAsync; + + internal JsonSerializerOptions? JsonSerializerOptions { get; private set; } + + internal Action ConfigureDefaultFactories { get; set; } + + /// + /// A connection string builder that can be used to configure the connection string on the builder. + /// + public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } + + /// + /// Returns the connection string, as currently configured on the builder. + /// + public string ConnectionString => ConnectionStringBuilder.ToString(); + + static NpgsqlSlimDataSourceBuilder() + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers([new AdoTypeInfoResolverFactory()]); + + /// + /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. + /// + public string? Name { get; set; } + + /// + /// Constructs a new , optionally starting out from the given + /// . + /// + public NpgsqlSlimDataSourceBuilder(string? connectionString = null) + : this(new NpgsqlConnectionStringBuilder(connectionString)) + {} + + internal NpgsqlSlimDataSourceBuilder(NpgsqlConnectionStringBuilder connectionStringBuilder) + { + ConnectionStringBuilder = connectionStringBuilder; + _userTypeMapper = new() { DefaultNameTranslator = GlobalTypeMapper.Instance.DefaultNameTranslator }; + ConfigureDefaultFactories = static instance => instance.AppendDefaultFactories(); + ConfigureResolverChain = static chain => chain.Add(UnsupportedTypeInfoResolver); + } + + /// + /// Sets the that will be used for logging. + /// + /// The logger factory to be used. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseLoggerFactory(ILoggerFactory? loggerFactory) + { + _loggerFactory = loggerFactory; + return this; + } + + /// + /// Enables parameters to be included in logging. This includes potentially sensitive information from data sent to PostgreSQL. + /// You should only enable this flag in development, or if you have the appropriate security measures in place based on the + /// sensitivity of this data. + /// + /// If , then sensitive data is logged. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabled = true) + { + _sensitiveDataLoggingEnabled = parameterLoggingEnabled; + return this; + } + + /// + /// Configure type loading options for the DataSource. Calling this again will replace + /// the prior action. + /// + public NpgsqlSlimDataSourceBuilder ConfigureTypeLoading(Action configureAction) + { + ArgumentNullException.ThrowIfNull(configureAction); + _typeLoadingOptionsBuilderCallbacks ??= new(); + _typeLoadingOptionsBuilderCallbacks.Add(configureAction); + return this; + } + + /// + /// Configures OpenTelemetry tracing options. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder ConfigureTracing(Action configureAction) + { + ArgumentNullException.ThrowIfNull(configureAction); + _tracingOptionsBuilderCallbacks ??= new(); + _tracingOptionsBuilderCallbacks.Add(configureAction); + return this; + } + + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + { + ArgumentNullException.ThrowIfNull(serializerOptions); + JsonSerializerOptions = serializerOptions; + return this; + } + + #region Authentication + + /// + /// When using SSL/TLS, this is a callback that allows customizing how the PostgreSQL-provided certificate is verified. This is an + /// advanced API, consider using or instead. + /// + /// The callback containing custom callback verification logic. + /// + /// + /// Cannot be used in conjunction with , or + /// . + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] + public NpgsqlSlimDataSourceBuilder UseUserCertificateValidationCallback( + RemoteCertificateValidationCallback userCertificateValidationCallback) + { + _userCertificateValidationCallback = userCertificateValidationCallback; + + return this; + } + + /// + /// Specifies an SSL/TLS certificate which Npgsql will send to PostgreSQL for certificate-based authentication. + /// + /// The client certificate to be sent to PostgreSQL when opening a connection. + /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] + public NpgsqlSlimDataSourceBuilder UseClientCertificate(X509Certificate? clientCertificate) + { + if (clientCertificate is null) + return UseClientCertificatesCallback(null); + + var clientCertificates = new X509CertificateCollection { clientCertificate }; + return UseClientCertificates(clientCertificates); + } + + /// + /// Specifies a collection of SSL/TLS certificates which Npgsql will send to PostgreSQL for certificate-based authentication. + /// + /// The client certificate collection to be sent to PostgreSQL when opening a connection. + /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] + public NpgsqlSlimDataSourceBuilder UseClientCertificates(X509CertificateCollection? clientCertificates) + => UseClientCertificatesCallback(clientCertificates is null ? null : certs => certs.AddRange(clientCertificates)); + + /// + /// When using SSL/TLS, this is a callback that allows customizing SslStream's authentication options. + /// + /// The callback to customize SslStream's authentication options. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseSslClientAuthenticationOptionsCallback(Action? sslClientAuthenticationOptionsCallback) + { + _sslClientAuthenticationOptionsCallback = sslClientAuthenticationOptionsCallback; + + return this; + } + + /// + /// Specifies a callback to modify the collection of SSL/TLS client certificates which Npgsql will send to PostgreSQL for + /// certificate-based authentication. This is an advanced API, consider using or + /// instead. + /// + /// The callback to modify the client certificate collection. + /// + /// + /// The callback is invoked every time a physical connection is opened, and is therefore suitable for rotating short-lived client + /// certificates. Simply make sure the certificate collection argument has the up-to-date certificate(s). + /// + /// + /// The callback's collection argument already includes any client certificates specified via the connection string or environment + /// variables. + /// + /// + /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] + public NpgsqlSlimDataSourceBuilder UseClientCertificatesCallback(Action? clientCertificatesCallback) + { + _clientCertificatesCallback = clientCertificatesCallback; + + return this; + } + + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificate. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertificate) + => rootCertificate is null + ? UseRootCertificatesCallback((Func?)null) + : UseRootCertificateCallback(() => rootCertificate); + + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificates. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificates(X509Certificate2Collection? rootCertificates) + => rootCertificates is null + ? UseRootCertificatesCallback((Func?)null) + : UseRootCertificatesCallback(() => rootCertificates); + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificate. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) + { + _transportSecurityHandler.RootCertificatesCallback = () => rootCertificateCallback is not null + ? new X509Certificate2Collection(rootCertificateCallback()) + : null; + + return this; + } + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificates. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificatesCallback(Func? rootCertificateCallback) + { + _transportSecurityHandler.RootCertificatesCallback = rootCertificateCallback; + + return this; + } + + /// + /// Configures a periodic password provider, which is automatically called by the data source at some regular interval. This is the + /// recommended way to fetch a rotating access token. + /// + /// A callback which returns the password to be sent to PostgreSQL. + /// How long to cache the password before re-invoking the callback. + /// + /// If a password refresh attempt fails, it will be re-attempted with this interval. + /// This should typically be much lower than . + /// + /// The same builder instance so that multiple calls can be chained. + /// + /// + /// The provided callback is invoked in a timer, and not when opening connections. It therefore doesn't affect opening time. + /// + /// + /// The provided cancellation token is only triggered when the entire data source is disposed. If you'd like to apply a timeout to the + /// token fetching, do so within the provided callback. + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( + Func>? passwordProvider, + TimeSpan successRefreshInterval, + TimeSpan failureRefreshInterval) + { + if (successRefreshInterval < TimeSpan.Zero) + throw new ArgumentException( + string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(successRefreshInterval)), nameof(successRefreshInterval)); + if (failureRefreshInterval < TimeSpan.Zero) + throw new ArgumentException( + string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(failureRefreshInterval)), nameof(failureRefreshInterval)); + + _periodicPasswordProvider = passwordProvider; + _periodicPasswordSuccessRefreshInterval = successRefreshInterval; + _periodicPasswordFailureRefreshInterval = failureRefreshInterval; + + return this; + } + + /// + /// Configures a password provider, which is called by the data source when opening connections. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// The same builder instance so that multiple calls can be chained. + /// + /// + /// The provided callback is invoked when opening connections. Therefore its important the callback internally depends on cached + /// data or returns quickly otherwise. Any unnecessary delay will affect connection opening time. + /// + /// + public NpgsqlSlimDataSourceBuilder UsePasswordProvider( + Func? passwordProvider, + Func>? passwordProviderAsync) + { + if (passwordProvider is null != passwordProviderAsync is null) + throw new ArgumentException(NpgsqlStrings.SyncAndAsyncPasswordProvidersRequired); + + _passwordProvider = passwordProvider; + _passwordProviderAsync = passwordProviderAsync; + return this; + } + + /// + /// When using Kerberos, this is a callback that allows customizing default settings for Kerberos authentication. + /// + /// The callback containing logic to customize Kerberos authentication settings. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseNegotiateOptionsCallback(Action? negotiateOptionsCallback) + { + _negotiateOptionsCallback = negotiateOptionsCallback; + + return this; + } + + #endregion Authentication + + #region Type mapping + + /// + public INpgsqlNameTranslator DefaultNameTranslator + { + get => _userTypeMapper.DefaultNameTranslator; + set => _userTypeMapper.DefaultNameTranslator = value; + } + + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET enum type to be mapped + public NpgsqlSlimDataSourceBuilder MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + { + _userTypeMapper.MapEnum(pgName, nameTranslator); + return this; + } + + /// + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + => _userTypeMapper.UnmapEnum(pgName, nameTranslator); + + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public NpgsqlSlimDataSourceBuilder MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + return this; + } + + /// + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); + + /// + /// Maps a CLR type to a PostgreSQL composite type. + /// + /// + /// CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your members to manually specify a PostgreSQL name. + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET type to be mapped + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public NpgsqlSlimDataSourceBuilder MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _userTypeMapper.MapComposite(typeof(T), pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _userTypeMapper.UnmapComposite(typeof(T), pgName, nameTranslator); + + /// + /// Maps a CLR type to a composite type. + /// + /// + /// Maps CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// The .NET type to be mapped. + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public NpgsqlSlimDataSourceBuilder MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + + /// + void INpgsqlTypeMapper.AddDbTypeResolverFactory(DbTypeResolverFactory factory) + => (_dbTypeResolverFactories ??= new()).Add(factory); + + /// + [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) + => _resolverChainBuilder.PrependResolverFactory(factory); + + /// + void INpgsqlTypeMapper.Reset() => _resolverChainBuilder.Clear(); + + internal Action> ConfigureResolverChain { get; set; } + internal void AppendResolverFactory(PgTypeInfoResolverFactory factory) + => _resolverChainBuilder.AppendResolverFactory(factory); + internal void AppendResolverFactory(Func factory) where T : PgTypeInfoResolverFactory + => _resolverChainBuilder.AppendResolverFactory(factory); + + internal void AppendDefaultFactories() + { + // When used publicly we start off with our slim defaults. + _resolverChainBuilder.AppendResolverFactory(_userTypeMapper); + if (GlobalTypeMapper.Instance.GetUserMappingsResolverFactory() is { } userMappingsResolverFactory) + _resolverChainBuilder.AppendResolverFactory(userMappingsResolverFactory); + foreach (var factory in GlobalTypeMapper.Instance.GetPluginResolverFactories()) + _resolverChainBuilder.AppendResolverFactory(factory); + _resolverChainBuilder.AppendResolverFactory(new AdoTypeInfoResolverFactory()); + } + + #endregion Type mapping + + #region Optional opt-ins + + /// + /// Sets up mappings for the PostgreSQL array types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableArrays() + { + _resolverChainBuilder.EnableArrays(); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL range types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableRanges() + { + _resolverChainBuilder.EnableRanges(); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableMultiranges() + { + _resolverChainBuilder.EnableMultiranges(); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET object[]. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableRecords() + { + AddTypeInfoResolverFactory(new RecordTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL tsquery and tsvector types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() + { + AddTypeInfoResolverFactory(new FullTextSearchTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL ltree extension types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableLTree() + { + AddTypeInfoResolverFactory(new LTreeTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL cube extension type. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableCube() + { + AddTypeInfoResolverFactory(new CubeTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings for extra conversions from PostgreSQL to .NET types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableExtraConversions() + { + AddTypeInfoResolverFactory(new ExtraConversionResolverFactory()); + return this; + } + + /// + /// Enables the possibility to use TLS/SSl encryption for connections to PostgreSQL. This does not guarantee that encryption will + /// actually be used; see for more details. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableTransportSecurity() + { + _transportSecurityHandler = new RealTransportSecurityHandler(); + return this; + } + + /// + /// Enables the possibility to use GSS/SSPI authentication and encryption for connections to PostgreSQL. This does not guarantee that it will + /// actually be used; see for more details. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableIntegratedSecurity() + { + _integratedSecurityHandler = new RealIntegratedSecurityHandler(); + return this; + } + + /// + /// Sets up network mappings. This allows mapping PhysicalAddress, IPAddress, NpgsqlInet and NpgsqlCidr types + /// to PostgreSQL macaddr, macaddr8, inet and cidr types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableNetworkTypes() + { + _resolverChainBuilder.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up network mappings. This allows mapping types like NpgsqlPoint and NpgsqlPath + /// to PostgreSQL point, path and so on types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableGeometricTypes() + { + _resolverChainBuilder.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up System.Text.Json mappings. This allows mapping JsonDocument and JsonElement types to PostgreSQL json and jsonb + /// types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableJsonTypes() + { + _resolverChainBuilder.AppendResolverFactory(() => new JsonTypeInfoResolverFactory(JsonSerializerOptions)); + return this; + } + + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public NpgsqlSlimDataSourceBuilder EnableDynamicJson( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + _resolverChainBuilder.AppendResolverFactory(() => new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, JsonSerializerOptions)); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlSlimDataSourceBuilder EnableRecordsAsTuples() + { + AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlSlimDataSourceBuilder EnableUnmappedTypes() + { + AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); + return this; + } + + #endregion Optional opt-ins + + /// + /// Register a connection initializer, which allows executing arbitrary commands when a physical database connection is first opened. + /// + /// + /// A synchronous connection initialization lambda, which will be called from when a new physical + /// connection is opened. + /// + /// + /// An asynchronous connection initialization lambda, which will be called from + /// when a new physical connection is opened. + /// + /// + /// If an initializer is registered, both sync and async versions must be provided. If you do not use sync APIs in your code, simply + /// throw , which would also catch accidental cases of sync opening. + /// + /// + /// Take care that the setting you apply in the initializer does not get reverted when the connection is returned to the pool, since + /// Npgsql sends DISCARD ALL by default. The option can be used to + /// turn this off. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UsePhysicalConnectionInitializer( + Action? connectionInitializer, + Func? connectionInitializerAsync) + { + if (connectionInitializer is null != connectionInitializerAsync is null) + throw new ArgumentException(NpgsqlStrings.SyncAndAsyncConnectionInitializersRequired); + + _connectionInitializer = connectionInitializer; + _connectionInitializerAsync = connectionInitializerAsync; + + return this; + } + + /// + /// Builds and returns an which is ready for use. + /// + public NpgsqlDataSource Build() + { + ConnectionStringBuilder.PostProcessAndValidate(); + var (connectionStringBuilder, config) = PrepareConfiguration(); + + if (ConnectionStringBuilder.Host!.Contains(',')) + { + ValidateMultiHost(); + + return new NpgsqlMultiHostDataSource(connectionStringBuilder, config); + } + + return ConnectionStringBuilder.Pooling + ? new PoolingDataSource(connectionStringBuilder, config) + : new UnpooledDataSource(connectionStringBuilder, config); + } + + /// + /// Builds and returns a which is ready for use for load-balancing and failover scenarios. + /// + public NpgsqlMultiHostDataSource BuildMultiHost() + { + ConnectionStringBuilder.PostProcessAndValidate(); + var (connectionStringBuilder, config) = PrepareConfiguration(); + + ValidateMultiHost(); + + return new(connectionStringBuilder, config); + } + + // Used in testing. + internal (NpgsqlConnectionStringBuilder, NpgsqlDataSourceConfiguration) PrepareConfiguration() + { + var connectionStringBuilder = ConnectionStringBuilder.Clone(); + + var sslClientAuthenticationOptionsCallback = _sslClientAuthenticationOptionsCallback; + var hasCertificateCallbacks = _userCertificateValidationCallback is not null || _clientCertificatesCallback is not null; + if (sslClientAuthenticationOptionsCallback is not null && hasCertificateCallbacks) + { + throw new NotSupportedException(NpgsqlStrings.SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported); + } + + if (sslClientAuthenticationOptionsCallback is null && hasCertificateCallbacks) + { + sslClientAuthenticationOptionsCallback = options => + { + if (_clientCertificatesCallback is not null) + { + options.ClientCertificates ??= new X509Certificate2Collection(); + _clientCertificatesCallback.Invoke(options.ClientCertificates); + } + + if (_userCertificateValidationCallback is not null) + { + options.RemoteCertificateValidationCallback = _userCertificateValidationCallback; + } + }; + } + + if (!_transportSecurityHandler.SupportEncryption && sslClientAuthenticationOptionsCallback is not null) + { + throw new InvalidOperationException(NpgsqlStrings.TransportSecurityDisabled); + } + + if (_passwordProvider is not null && _periodicPasswordProvider is not null) + { + throw new NotSupportedException(NpgsqlStrings.CannotSetMultiplePasswordProviderKinds); + } + + if ((_passwordProvider is not null || _periodicPasswordProvider is not null) && + (connectionStringBuilder.Password is not null || connectionStringBuilder.Passfile is not null)) + { + throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); + } + + ConfigureDefaultFactories(this); + + var typeLoadingOptionsBuilder = new NpgsqlTypeLoadingOptionsBuilder(); +#pragma warning disable CS0618 // Type or member is obsolete + typeLoadingOptionsBuilder.EnableTableCompositesLoading(connectionStringBuilder.LoadTableComposites); + typeLoadingOptionsBuilder.EnableTypeLoading(connectionStringBuilder.ServerCompatibilityMode is not ServerCompatibilityMode.NoTypeLoading); +#pragma warning restore CS0618 // Type or member is obsolete + foreach (var callback in _typeLoadingOptionsBuilderCallbacks ?? (IEnumerable>)[]) + callback.Invoke(typeLoadingOptionsBuilder); + var typeLoadingOptions = typeLoadingOptionsBuilder.Build(); + + var tracingOptionsBuilder = new NpgsqlTracingOptionsBuilder(); + foreach (var callback in _tracingOptionsBuilderCallbacks ?? (IEnumerable>)[]) + callback.Invoke(tracingOptionsBuilder); + var tracingOptions = tracingOptionsBuilder.Build(); + + return (connectionStringBuilder, new( + Name, + _loggerFactory is null + ? NpgsqlLoggingConfiguration.NullConfiguration + : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), + tracingOptions, + typeLoadingOptions, + _transportSecurityHandler, + _integratedSecurityHandler, + sslClientAuthenticationOptionsCallback, + _passwordProvider, + _passwordProviderAsync, + _periodicPasswordProvider, + _periodicPasswordSuccessRefreshInterval, + _periodicPasswordFailureRefreshInterval, + _resolverChainBuilder.Build(ConfigureResolverChain), + _dbTypeResolverFactories ?? [], + DefaultNameTranslator, + _connectionInitializer, + _connectionInitializerAsync, + _negotiateOptionsCallback)); + } + + void ValidateMultiHost() + { + if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) + throw new NotSupportedException("Replication is not supported with multiple hosts"); + } + + INpgsqlTypeMapper INpgsqlTypeMapper.ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + => ConfigureJsonOptions(serializerOptions); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode( + "Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableDynamicJson(Type[]? jsonbClrTypes, Type[]? jsonClrTypes) + => EnableDynamicJson(jsonbClrTypes, jsonClrTypes); + + [RequiresUnreferencedCode( + "The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableRecordsAsTuples() + => EnableRecordsAsTuples(); + + [RequiresUnreferencedCode( + "The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableUnmappedTypes() + => EnableUnmappedTypes(); + + /// + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapEnum(pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapComposite(typeof(T), pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); + return this; + } +} diff --git a/src/Npgsql/NpgsqlSqlEventSource.cs b/src/Npgsql/NpgsqlSqlEventSource.cs index d8dc66d157..1e37a2355f 100644 --- a/src/Npgsql/NpgsqlSqlEventSource.cs +++ b/src/Npgsql/NpgsqlSqlEventSource.cs @@ -1,5 +1,4 @@ using System.Diagnostics.Tracing; -using System.Runtime.CompilerServices; namespace Npgsql; @@ -22,9 +21,8 @@ internal NpgsqlSqlEventSource() : base(EventSourceName) {} // - A stop event's event id must be next one after its start event. [Event(CommandStartId, Level = EventLevel.Informational)] - public void CommandStart(string sql) => Log.WriteEvent(CommandStartId, sql); + public void CommandStart(string sql) => WriteEvent(CommandStartId, sql); - [MethodImpl(MethodImplOptions.NoInlining)] [Event(CommandStopId, Level = EventLevel.Informational)] - public void CommandStop() => Log.WriteEvent(CommandStopId); -} \ No newline at end of file + public void CommandStop() => WriteEvent(CommandStopId); +} diff --git a/src/Npgsql/NpgsqlTracingOptions.cs b/src/Npgsql/NpgsqlTracingOptions.cs deleted file mode 100644 index 4aa61beec6..0000000000 --- a/src/Npgsql/NpgsqlTracingOptions.cs +++ /dev/null @@ -1,9 +0,0 @@ -namespace Npgsql; - -/// -/// Options to configure Npgsql's support for OpenTelemetry tracing. -/// Currently no options are available. -/// -public class NpgsqlTracingOptions -{ -} \ No newline at end of file diff --git a/src/Npgsql/NpgsqlTracingOptionsBuilder.cs b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs new file mode 100644 index 0000000000..d38cf1d32d --- /dev/null +++ b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs @@ -0,0 +1,164 @@ +using System; +using System.Diagnostics; + +namespace Npgsql; + +/// +/// A builder to configure Npgsql's support for OpenTelemetry tracing. +/// +public sealed class NpgsqlTracingOptionsBuilder +{ + Func? _commandFilter; + Func? _batchFilter; + Action? _commandEnrichmentCallback; + Action? _batchEnrichmentCallback; + Func? _commandSpanNameProvider; + Func? _batchSpanNameProvider; + bool _enableFirstResponseEvent = true; + bool _enablePhysicalOpenTracing = true; + + Func? _copyOperationFilter; + Action? _copyOperationEnrichmentCallback; + Func? _copyOperationSpanNameProvider; + + internal NpgsqlTracingOptionsBuilder() + { + } + + /// + /// Configures a filter function that determines whether to emit tracing information for an . + /// By default, tracing information is emitted for all commands. + /// + public NpgsqlTracingOptionsBuilder ConfigureCommandFilter(Func? commandFilter) + { + _commandFilter = commandFilter; + return this; + } + + /// + /// Configures a filter function that determines whether to emit tracing information for an . + /// By default, tracing information is emitted for all batches. + /// + public NpgsqlTracingOptionsBuilder ConfigureBatchFilter(Func? batchFilter) + { + _batchFilter = batchFilter; + return this; + } + + /// + /// Configures a callback that can enrich the emitted for the given . + /// + public NpgsqlTracingOptionsBuilder ConfigureCommandEnrichmentCallback(Action? commandEnrichmentCallback) + { + _commandEnrichmentCallback = commandEnrichmentCallback; + return this; + } + + /// + /// Configures a callback that can enrich the emitted for the given . + /// + public NpgsqlTracingOptionsBuilder ConfigureBatchEnrichmentCallback(Action? batchEnrichmentCallback) + { + _batchEnrichmentCallback = batchEnrichmentCallback; + return this; + } + + /// + /// Configures a callback that provides the tracing span's name for an . If null, the default standard + /// span name is used, which is the database name. + /// + public NpgsqlTracingOptionsBuilder ConfigureCommandSpanNameProvider(Func? commandSpanNameProvider) + { + _commandSpanNameProvider = commandSpanNameProvider; + return this; + } + + /// + /// Configures a callback that provides the tracing span's name for an . If null, the default standard + /// span name is used, which is the database name. + /// + public NpgsqlTracingOptionsBuilder ConfigureBatchSpanNameProvider(Func? batchSpanNameProvider) + { + _batchSpanNameProvider = batchSpanNameProvider; + return this; + } + + /// + /// Gets or sets a value indicating whether to enable the "time-to-first-read" event. + /// Default is true to preserve existing behavior. + /// + public NpgsqlTracingOptionsBuilder EnableFirstResponseEvent(bool enable = true) + { + _enableFirstResponseEvent = enable; + return this; + } + + /// + /// Gets or sets a value indicating whether to trace physical connection open. + /// Default is true to preserve existing behavior. + /// + public NpgsqlTracingOptionsBuilder EnablePhysicalOpenTracing(bool enable = true) + { + _enablePhysicalOpenTracing = enable; + return this; + } + + /// + /// Configures a filter function that determines whether to emit tracing information for a copy operation. + /// By default, tracing information is emitted for all copy operations. + /// + public NpgsqlTracingOptionsBuilder ConfigureCopyOperationFilter(Func? copyOperationFilter) + { + _copyOperationFilter = copyOperationFilter; + return this; + } + + /// + /// Configures a callback that can enrich the emitted for a given copy operation. + /// + public NpgsqlTracingOptionsBuilder ConfigureCopyOperationEnrichmentCallback(Action? copyOperationEnrichmentCallback) + { + _copyOperationEnrichmentCallback = copyOperationEnrichmentCallback; + return this; + } + + /// + /// Configures a callback that provides the tracing span's name for a copy operation. If null, the default standard + /// span name is used, which is the database name. + /// + public NpgsqlTracingOptionsBuilder ConfigureCopyOperationSpanNameProvider(Func? copyOperationSpanNameProvider) + { + _copyOperationSpanNameProvider = copyOperationSpanNameProvider; + return this; + } + + internal NpgsqlTracingOptions Build() => new() + { + CommandFilter = _commandFilter, + BatchFilter = _batchFilter, + CommandEnrichmentCallback = _commandEnrichmentCallback, + BatchEnrichmentCallback = _batchEnrichmentCallback, + CommandSpanNameProvider = _commandSpanNameProvider, + BatchSpanNameProvider = _batchSpanNameProvider, + EnableFirstResponseEvent = _enableFirstResponseEvent, + EnablePhysicalOpenTracing = _enablePhysicalOpenTracing, + CopyOperationFilter = _copyOperationFilter, + CopyOperationEnrichmentCallback = _copyOperationEnrichmentCallback, + CopyOperationSpanNameProvider = _copyOperationSpanNameProvider + }; +} + +sealed class NpgsqlTracingOptions +{ + internal Func? CommandFilter { get; init; } + internal Func? BatchFilter { get; init; } + internal Action? CommandEnrichmentCallback { get; init; } + internal Action? BatchEnrichmentCallback { get; init; } + internal Func? CommandSpanNameProvider { get; init; } + internal Func? BatchSpanNameProvider { get; init; } + internal bool EnableFirstResponseEvent { get; init; } + internal bool EnablePhysicalOpenTracing { get; init; } + internal Func? CopyOperationFilter { get; init; } + internal Action? CopyOperationEnrichmentCallback { get; init; } + internal Func? CopyOperationSpanNameProvider { get; init; } +} diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 0f0cb20fc6..14254bdccc 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -132,7 +132,7 @@ async Task Commit(bool async, CancellationToken cancellationToken = default) using (_connector.StartUserAction(cancellationToken)) { - await _connector.ExecuteInternalCommand(PregeneratedMessages.CommitTransaction, async, cancellationToken); + await _connector.ExecuteInternalCommand(PregeneratedMessages.CommitTransaction, async, cancellationToken).ConfigureAwait(false); LogMessages.CommittedTransaction(_transactionLogger, _connector.Id); } } @@ -143,15 +143,8 @@ async Task Commit(bool async, CancellationToken cancellationToken = default) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NETSTANDARD2_0 - public Task CommitAsync(CancellationToken cancellationToken = default) -#else public override Task CommitAsync(CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Commit(true, cancellationToken); - } + => Commit(async: true, cancellationToken); #endregion @@ -171,7 +164,7 @@ async Task Rollback(bool async, CancellationToken cancellationToken = default) using (_connector.StartUserAction(cancellationToken)) { - await _connector.Rollback(async, cancellationToken); + await _connector.Rollback(async, cancellationToken).ConfigureAwait(false); LogMessages.RolledBackTransaction(_transactionLogger, _connector.Id); } } @@ -182,15 +175,8 @@ async Task Rollback(bool async, CancellationToken cancellationToken = default) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NETSTANDARD2_0 - public Task RollbackAsync(CancellationToken cancellationToken = default) -#else public override Task RollbackAsync(CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Rollback(true, cancellationToken); - } + => Rollback(async: true, cancellationToken); #endregion @@ -204,16 +190,9 @@ public override Task RollbackAsync(CancellationToken cancellationToken = default /// This method does not cause a database roundtrip to be made. The savepoint creation statement will instead be sent along with /// the next command. /// -#if NET5_0_OR_GREATER public override void Save(string name) -#else - public void Save(string name) -#endif { - if (name == null) - throw new ArgumentNullException(nameof(name)); - if (string.IsNullOrWhiteSpace(name)) - throw new ArgumentException("name can't be empty", nameof(name)); + ArgumentException.ThrowIfNullOrWhiteSpace(name); CheckReady(); if (!_connector.DatabaseInfo.SupportsTransactions) @@ -230,16 +209,7 @@ public void Save(string name) // Note: savepoint names are PostgreSQL identifiers, and so limited by default to 63 characters. // Since we are prepending, we assume below that the statement will always fit in the buffer. - _connector.WriteBuffer.WriteByte(FrontendMessageCode.Query); - _connector.WriteBuffer.WriteInt32( - sizeof(int) + // Message length (including self excluding code) - _connector.TextEncoding.GetByteCount("SAVEPOINT ") + - _connector.TextEncoding.GetByteCount(name) + - sizeof(byte)); // Null terminator - - _connector.WriteBuffer.WriteString("SAVEPOINT "); - _connector.WriteBuffer.WriteString(name); - _connector.WriteBuffer.WriteByte(0); + _connector.WriteQuery("SAVEPOINT " + name, async: false).GetAwaiter().GetResult(); _connector.PendingPrependedResponses += 2; } @@ -255,22 +225,15 @@ public void Save(string name) /// This method does not cause a database roundtrip to be made, and will therefore always complete synchronously. /// The savepoint creation statement will instead be sent along with the next command. /// -#if NET5_0_OR_GREATER public override Task SaveAsync(string name, CancellationToken cancellationToken = default) -#else - public Task SaveAsync(string name, CancellationToken cancellationToken = default) -#endif { Save(name); return Task.CompletedTask; } - async Task Rollback(string name, bool async, CancellationToken cancellationToken = default) + async Task Rollback(bool async, string name, CancellationToken cancellationToken = default) { - if (name == null) - throw new ArgumentNullException(nameof(name)); - if (string.IsNullOrWhiteSpace(name)) - throw new ArgumentException("name can't be empty", nameof(name)); + ArgumentException.ThrowIfNullOrWhiteSpace(name); CheckReady(); if (!_connector.DatabaseInfo.SupportsTransactions) @@ -278,7 +241,7 @@ async Task Rollback(string name, bool async, CancellationToken cancellationToken using (_connector.StartUserAction(cancellationToken)) { var quotedName = RequiresQuoting(name) ? $"\"{name.Replace("\"", "\"\"")}\"" : name; - await _connector.ExecuteInternalCommand($"ROLLBACK TO SAVEPOINT {quotedName}", async, cancellationToken); + await _connector.ExecuteInternalCommand($"ROLLBACK TO SAVEPOINT {quotedName}", async, cancellationToken).ConfigureAwait(false); LogMessages.RolledBackToSavepoint(_transactionLogger, name, _connector.Id); } } @@ -287,12 +250,8 @@ async Task Rollback(string name, bool async, CancellationToken cancellationToken /// Rolls back a transaction from a pending savepoint state. /// /// The name of the savepoint. -#if NET5_0_OR_GREATER public override void Rollback(string name) -#else - public void Rollback(string name) -#endif - => Rollback(name, false).GetAwaiter().GetResult(); + => Rollback(async: false, name).GetAwaiter().GetResult(); /// /// Rolls back a transaction from a pending savepoint state. @@ -301,22 +260,12 @@ public void Rollback(string name) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NET5_0_OR_GREATER public override Task RollbackAsync(string name, CancellationToken cancellationToken = default) -#else - public Task RollbackAsync(string name, CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Rollback(name, true, cancellationToken); - } + => Rollback(async: true, name, cancellationToken); - async Task Release(string name, bool async, CancellationToken cancellationToken = default) + async Task Release(bool async, string name, CancellationToken cancellationToken = default) { - if (name == null) - throw new ArgumentNullException(nameof(name)); - if (string.IsNullOrWhiteSpace(name)) - throw new ArgumentException("name can't be empty", nameof(name)); + ArgumentException.ThrowIfNullOrWhiteSpace(name); CheckReady(); if (!_connector.DatabaseInfo.SupportsTransactions) @@ -324,7 +273,7 @@ async Task Release(string name, bool async, CancellationToken cancellationToken using (_connector.StartUserAction(cancellationToken)) { var quotedName = RequiresQuoting(name) ? $"\"{name.Replace("\"", "\"\"")}\"" : name; - await _connector.ExecuteInternalCommand($"RELEASE SAVEPOINT {quotedName}", async, cancellationToken); + await _connector.ExecuteInternalCommand($"RELEASE SAVEPOINT {quotedName}", async, cancellationToken).ConfigureAwait(false); LogMessages.ReleasedSavepoint(_transactionLogger, name, _connector.Id); } } @@ -333,11 +282,8 @@ async Task Release(string name, bool async, CancellationToken cancellationToken /// Releases a transaction from a pending savepoint state. /// /// The name of the savepoint. -#if NET5_0_OR_GREATER - public override void Release(string name) => Release(name, false).GetAwaiter().GetResult(); -#else - public void Release(string name) => Release(name, false).GetAwaiter().GetResult(); -#endif + public override void Release(string name) + => Release(async: false, name).GetAwaiter().GetResult(); /// /// Releases a transaction from a pending savepoint state. @@ -346,15 +292,13 @@ async Task Release(string name, bool async, CancellationToken cancellationToken /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NET5_0_OR_GREATER public override Task ReleaseAsync(string name, CancellationToken cancellationToken = default) -#else - public Task ReleaseAsync(string name, CancellationToken cancellationToken = default) -#endif - { - using (NoSynchronizationContextScope.Enter()) - return Release(name, true, cancellationToken); - } + => Release(async: false, name, cancellationToken); + + /// + /// Indicates whether this transaction supports database savepoints. + /// + public override bool SupportsSavepoints => _connector.DatabaseInfo.SupportsTransactions; #endregion @@ -384,30 +328,24 @@ protected override void Dispose(bool disposing) } IsDisposed = true; - _connector?.Connection?.EndBindingScope(ConnectorBindingScope.Transaction); } } /// /// Disposes the transaction, rolling it back if it is still pending. /// -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else public override ValueTask DisposeAsync() -#endif { if (!IsDisposed) { if (!IsCompleted) { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncInternal(); + return DisposeAsyncInternal(); } IsDisposed = true; - _connector?.Connection?.EndBindingScope(ConnectorBindingScope.Transaction); } + return default; async ValueTask DisposeAsyncInternal() @@ -415,8 +353,8 @@ async ValueTask DisposeAsyncInternal() // We're disposing, so no cancellation token try { - await _connector.CloseOngoingOperations(async: true); - await Rollback(async: true); + await _connector.CloseOngoingOperations(async: true).ConfigureAwait(false); + await Rollback(async: true).ConfigureAwait(false); } catch (Exception ex) { @@ -425,7 +363,6 @@ async ValueTask DisposeAsyncInternal() } IsDisposed = true; - _connector?.Connection?.EndBindingScope(ConnectorBindingScope.Transaction); } } @@ -447,13 +384,13 @@ void CheckReady() { CheckDisposed(); if (IsCompleted) - throw new InvalidOperationException("This NpgsqlTransaction has completed; it is no longer usable."); + ThrowHelper.ThrowInvalidOperationException("This NpgsqlTransaction has completed; it is no longer usable."); } void CheckDisposed() { if (IsDisposed) - throw new ObjectDisposedException(typeof(NpgsqlTransaction).Name, _disposeReason); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlTransaction), _disposeReason); } static bool RequiresQuoting(string identifier) @@ -500,4 +437,4 @@ internal void UnbindIfNecessary() } #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypeLoadingOptions.cs b/src/Npgsql/NpgsqlTypeLoadingOptions.cs new file mode 100644 index 0000000000..c031826675 --- /dev/null +++ b/src/Npgsql/NpgsqlTypeLoadingOptions.cs @@ -0,0 +1,114 @@ +using System; +using System.Collections.Generic; + +namespace Npgsql; + +/// +/// Options for configuring Npgsql type loading. +/// +sealed class NpgsqlTypeLoadingOptions +{ + /// + /// Load table composite type definitions, and not just free-standing composite types. + /// + public required bool LoadTableComposites { get; init; } + + /// + /// When false, if the server doesn't support full type loading from the PostgreSQL catalogs, + /// support the basic set of types via information hardcoded inside Npgsql. + /// + public required bool LoadTypes { get; init; } = true; + + /// + /// Load type definitions from the given schemas. + /// + public required string[]? TypeLoadingSchemas { get; init; } +} + +/// +/// Options builder for configuring Npgsql type loading. +/// +public sealed class NpgsqlTypeLoadingOptionsBuilder +{ + bool _loadTableComposites; + bool _loadTypes = true; + List? _typeLoadingSchemas; + + internal NpgsqlTypeLoadingOptionsBuilder() {} + + /// + /// Enable loading table composite type definitions, and not just free-standing composite types. + /// + public NpgsqlTypeLoadingOptionsBuilder EnableTableCompositesLoading(bool enable = true) + { + _loadTableComposites = enable; + return this; + } + + /// + /// Enable loading of types, when disabled Npgsql falls back to a small, builtin, set of known types and type ids. + /// + public NpgsqlTypeLoadingOptionsBuilder EnableTypeLoading(bool enable = true) + { + _loadTypes = enable; + return this; + } + + /// + /// Set the schemas to load types from, this can be used to reduce the work done during type loading. + /// + /// Npgsql will always load types from the following schemas: pg_catalog, information_schema, pg_toast. + /// Any user-defined types (typcategory 'U') will also be loaded regardless of their schema. + /// Schemas to load types from. + public NpgsqlTypeLoadingOptionsBuilder SetTypeLoadingSchemas(params IEnumerable? schemas) + { + if (schemas is null) + { + _typeLoadingSchemas = null; + return this; + } + + _typeLoadingSchemas = new(); + foreach (var schema in schemas) + { + if (schema is not { Length: > 0 }) + { + _typeLoadingSchemas = null; + throw new ArgumentException("Schema cannot be null or empty."); + } + _typeLoadingSchemas.Add(schema); + } + + return this; + } + + internal NpgsqlTypeLoadingOptions Build() => new() + { + LoadTableComposites = _loadTableComposites, + LoadTypes = _loadTypes, + TypeLoadingSchemas = _typeLoadingSchemas?.ToArray() + }; +} + +/// +/// An option specified in the connection string that activates special compatibility features. +/// +public enum ServerCompatibilityMode +{ + /// + /// No special server compatibility mode is active + /// + None, + + /// + /// The server is an Amazon Redshift instance. + /// + [Obsolete("ServerCompatibilityMode.Redshift no longer does anything and can be safely removed.")] + Redshift, + + /// + /// The server is doesn't support full type loading from the PostgreSQL catalogs, support the basic set + /// of types via information hardcoded inside Npgsql. + /// + NoTypeLoading, +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs new file mode 100644 index 0000000000..b84953c483 --- /dev/null +++ b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs @@ -0,0 +1,251 @@ +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; + +// ReSharper disable once CheckNamespace +namespace NpgsqlTypes; + +/// +/// Represents a PostgreSQL cube data type. +/// +/// +/// See https://www.postgresql.org/docs/current/cube.html +/// +public readonly struct NpgsqlCube : IEquatable +{ + // Store the coordinates as a value tuple array + readonly double[] _lowerLeft; + readonly double[] _upperRight; + + /// + /// The lower left coordinates of the cube. + /// + public IReadOnlyList LowerLeft => _lowerLeft; + + /// + /// The upper right coordinates of the cube. + /// + public IReadOnlyList UpperRight => _upperRight; + + /// + /// The number of dimensions of the cube. + /// + public int Dimensions => _lowerLeft.Length; + + /// + /// True if the cube is a point, that is, the two defining corners are the same. + /// + public bool IsPoint { get; } + + /// + /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. + /// + /// This is an internal constructor to optimize the number of allocations. + /// The lower left values. + /// The upper right values. + /// + /// Thrown if the number of dimensions in the upper left and lower right values do not match. + /// + internal NpgsqlCube(double[] lowerLeft, double[] upperRight) + { + if (lowerLeft.Length != upperRight.Length) + throw new ArgumentException($"Not a valid cube: Different point dimensions in {lowerLeft} and {upperRight}."); + + IsPoint = lowerLeft.SequenceEqual(upperRight); + _lowerLeft = lowerLeft; + _upperRight = upperRight; + } + + /// + /// Makes a one dimensional cube with both coordinates the same. + /// + /// The point coordinate. + public NpgsqlCube(double coord) + { + IsPoint = true; + _lowerLeft = [coord]; + _upperRight = _lowerLeft; + } + + /// + /// Makes a one dimensional cube. + /// + /// The lower left value. + /// The upper right value. + public NpgsqlCube(double lowerLeft, double upperRight) + { + IsPoint = lowerLeft.CompareTo(upperRight) == 0; + _lowerLeft = [lowerLeft]; + _upperRight = IsPoint ? _lowerLeft : [upperRight]; + } + + /// + /// Makes a zero-volume cube using the coordinates defined by the array. + /// + /// The coordinates. + public NpgsqlCube(IEnumerable coords) + { + // Always create a defensive copy to prevent external mutation + _lowerLeft = coords.ToArray(); + IsPoint = true; + _upperRight = _lowerLeft; + } + + /// + /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. + /// + /// The lower left values. + /// The upper right values. + /// + /// Thrown if the number of dimensions in the upper left and lower right values do not match + /// or if the cube exceeds the maximum dimensions (100). + /// + public NpgsqlCube(IEnumerable lowerLeft, IEnumerable upperRight) : + this(lowerLeft.ToArray(), upperRight.ToArray()) + { } + + /// + /// Makes a new cube by adding a dimension on to an existing cube, with the same values for both endpoints of the new coordinate. + /// This is useful for building cubes piece by piece from calculated values. + /// + /// The existing cube. + /// The coordinate to add. + public NpgsqlCube(NpgsqlCube cube, double coord) + { + IsPoint = cube.IsPoint; + if (IsPoint) + { + _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); + _upperRight = _lowerLeft; + } + else + { + _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); + _upperRight = cube._upperRight.Append(coord).ToArray(); + } + } + + /// + /// Makes a new cube by adding a dimension on to an existing cube. + /// This is useful for building cubes piece by piece from calculated values. + /// + /// The existing cube. + /// The lower left value. + /// The upper right value. + public NpgsqlCube(NpgsqlCube cube, double lowerLeft, double upperRight) + { + IsPoint = cube.IsPoint && lowerLeft.CompareTo(upperRight) == 0; + if (IsPoint) + { + _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); + _upperRight = _lowerLeft; + } + else + { + _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); + _upperRight = cube._upperRight.Append(upperRight).ToArray(); + } + } + + /// + /// Makes a new cube from an existing cube, using a list of dimension indexes from an array. + /// Can be used to extract the endpoints of a single dimension, or to drop dimensions, or to reorder them as desired. + /// + /// The list of dimension indexes. + /// A new cube. + /// + /// + /// var cube = new NpgsqlCube(new[] { 1, 3, 5 }, new[] { 6, 7, 8 }); // '(1,3,5),(6,7,8)' + /// cube.ToSubset(1); // '(3),(7)' + /// cube.ToSubset(2, 1, 0, 0); // '(5,3,1,1),(8,7,6,6)' + /// + /// + public NpgsqlCube ToSubset(params int[] indexes) + { + var lowerLeft = new double[indexes.Length]; + var upperRight = new double[indexes.Length]; + + for (var i = 0; i < indexes.Length; i++) + { + lowerLeft[i] = _lowerLeft[indexes[i]]; + upperRight[i] = _upperRight[indexes[i]]; + } + + return new NpgsqlCube(lowerLeft, upperRight); + } + + /// + public bool Equals(NpgsqlCube other) => Dimensions == other.Dimensions + && _lowerLeft.SequenceEqual(other._lowerLeft) + && _upperRight.SequenceEqual(other._upperRight); + + /// + public override bool Equals(object? obj) => obj is NpgsqlCube other && Equals(other); + + /// + public static bool operator ==(NpgsqlCube x, NpgsqlCube y) => x.Equals(y); + + /// + public static bool operator !=(NpgsqlCube x, NpgsqlCube y) => !(x == y); + + /// + public override int GetHashCode() + { + var hashCode = new HashCode(); + for (var i = 0; i < Dimensions; i++) + { + hashCode.Add(_lowerLeft[i]); + hashCode.Add(_upperRight[i]); + } + return hashCode.ToHashCode(); + } + + /// + /// Writes the cube in PostgreSQL's text format. + /// + void Write(StringBuilder stringBuilder) + { + var leftBuilder = new StringBuilder(); + var rightBuilder = new StringBuilder(); + + leftBuilder.Append('('); + rightBuilder.Append('('); + + for (var i = 0; i < Dimensions; i++) + { + leftBuilder.Append(CultureInfo.InvariantCulture, $"{_lowerLeft[i]:G17}"); + rightBuilder.Append(CultureInfo.InvariantCulture, $"{_upperRight[i]:G17}"); + + if (i >= Dimensions - 1) continue; + + leftBuilder.Append(", "); + rightBuilder.Append(", "); + } + + leftBuilder.Append(')'); + rightBuilder.Append(')'); + + if (IsPoint) + { + stringBuilder.Append(leftBuilder); + } + else + { + stringBuilder.Append(leftBuilder); + stringBuilder.Append(','); + stringBuilder.Append(rightBuilder); + } + } + + /// + /// Writes the cube in PostgreSQL's text format. + /// + public override string ToString() + { + var sb = new StringBuilder(); + Write(sb); + return sb.ToString(); + } +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index 8df0ee874f..ab8d1480f8 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -1,6 +1,8 @@ using System; +using System.Data; using Npgsql; -using Npgsql.TypeMapping; +using Npgsql.Internal.Postgres; +using static Npgsql.Util.Statics; #pragma warning disable CA1720 @@ -27,49 +29,42 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL 8-byte "bigint" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("int8", baseOID: 20, arrayOID: 1016, rangeName: "int8range", rangeOID: 3926, multirangeName: "int8multirange", multirangeOID: 4536)] Bigint = 1, /// /// Corresponds to the PostgreSQL 8-byte floating-point "double" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("float8", baseOID: 701, arrayOID: 1022)] Double = 8, /// /// Corresponds to the PostgreSQL 4-byte "integer" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("int4", baseOID: 23, arrayOID: 1007, rangeName: "int4range", rangeOID: 3904, multirangeName: "int4multirange", multirangeOID: 4451)] Integer = 9, /// /// Corresponds to the PostgreSQL arbitrary-precision "numeric" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("numeric", baseOID: 1700, arrayOID: 1231, rangeName: "numrange", rangeOID: 3906, multirangeName: "nummultirange", multirangeOID: 4532)] Numeric = 13, /// /// Corresponds to the PostgreSQL floating-point "real" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("float4", baseOID: 700, arrayOID: 1021)] Real = 17, /// /// Corresponds to the PostgreSQL 2-byte "smallint" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("int2", baseOID: 21, arrayOID: 1005)] Smallint = 18, /// /// Corresponds to the PostgreSQL "money" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-money.html - [BuiltInPostgresType("money", baseOID: 790, arrayOID: 791)] Money = 12, #endregion @@ -80,7 +75,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "boolean" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-boolean.html - [BuiltInPostgresType("bool", baseOID: 16, arrayOID: 1000)] Boolean = 2, #endregion @@ -91,51 +85,50 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL geometric "box" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("box", baseOID: 603, arrayOID: 1020)] Box = 3, /// /// Corresponds to the PostgreSQL geometric "circle" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("circle", baseOID: 718, arrayOID: 719)] Circle = 5, /// /// Corresponds to the PostgreSQL geometric "line" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("line", baseOID: 628, arrayOID: 629)] Line = 10, /// /// Corresponds to the PostgreSQL geometric "lseg" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("lseg", baseOID: 601, arrayOID: 1018)] LSeg = 11, /// /// Corresponds to the PostgreSQL geometric "path" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("path", baseOID: 602, arrayOID: 1019)] Path = 14, /// /// Corresponds to the PostgreSQL geometric "point" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("point", baseOID: 600, arrayOID: 1017)] Point = 15, /// /// Corresponds to the PostgreSQL geometric "polygon" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("polygon", baseOID: 604, arrayOID: 1027)] Polygon = 16, + /// + /// Corresponds to the PostgreSQL "cube" type, a geometric type representing multi-dimensional cubes. + /// + /// See https://www.postgresql.org/docs/current/cube.html + Cube = 63, // Extension type + #endregion #region Character Types @@ -144,28 +137,24 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "char(n)" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("bpchar", baseOID: 1042, arrayOID: 1014)] Char = 6, /// /// Corresponds to the PostgreSQL "text" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("text", baseOID: 25, arrayOID: 1009)] Text = 19, /// /// Corresponds to the PostgreSQL "varchar" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("varchar", baseOID: 1043, arrayOID: 1015)] Varchar = 22, /// /// Corresponds to the PostgreSQL internal "name" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("name", baseOID: 19, arrayOID: 1003)] Name = 32, /// @@ -182,7 +171,6 @@ public enum NpgsqlDbType /// /// See https://www.postgresql.org/docs/current/static/datatype-text.html /// - [BuiltInPostgresType("char", baseOID: 18, arrayOID: 1002)] InternalChar = 38, #endregion @@ -193,7 +181,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "bytea" type, holding a raw byte string. /// /// See https://www.postgresql.org/docs/current/static/datatype-binary.html - [BuiltInPostgresType("bytea", baseOID: 17, arrayOID: 1001)] Bytea = 4, #endregion @@ -204,56 +191,36 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "date" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("date", baseOID: 1082, arrayOID: 1182, rangeName: "daterange", rangeOID: 3912, multirangeName: "datemultirange", multirangeOID: 4535)] Date = 7, /// /// Corresponds to the PostgreSQL "time" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("time", baseOID: 1083, arrayOID: 1183)] Time = 20, /// /// Corresponds to the PostgreSQL "timestamp" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("timestamp", baseOID: 1114, arrayOID: 1115, rangeName: "tsrange", rangeOID: 3908, multirangeName: "tsmultirange", multirangeOID: 4533)] Timestamp = 21, /// /// Corresponds to the PostgreSQL "timestamp with time zone" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [Obsolete("Use TimestampTz instead")] // NOTE: Don't remove this (see #1694) - TimestampTZ = TimestampTz, - - /// - /// Corresponds to the PostgreSQL "timestamp with time zone" type. - /// - /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("timestamptz", baseOID: 1184, arrayOID: 1185, rangeName: "tstzrange", rangeOID: 3910, multirangeName: "tstzmultirange", multirangeOID: 4534)] TimestampTz = 26, /// /// Corresponds to the PostgreSQL "interval" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("interval", baseOID: 1186, arrayOID: 1187)] Interval = 30, /// /// Corresponds to the PostgreSQL "time with time zone" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [Obsolete("Use TimeTz instead")] // NOTE: Don't remove this (see #1694) - TimeTZ = TimeTz, - - /// - /// Corresponds to the PostgreSQL "time with time zone" type. - /// - /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("timetz", baseOID: 1266, arrayOID: 1270)] TimeTz = 31, /// @@ -271,28 +238,24 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "inet" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("inet", baseOID: 869, arrayOID: 1041)] Inet = 24, /// /// Corresponds to the PostgreSQL "cidr" type, a field storing an IPv4 or IPv6 network. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("cidr", baseOID: 650, arrayOID: 651)] Cidr = 44, /// /// Corresponds to the PostgreSQL "macaddr" type, a field storing a 6-byte physical address. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("macaddr", baseOID: 829, arrayOID: 1040)] MacAddr = 34, /// /// Corresponds to the PostgreSQL "macaddr8" type, a field storing a 6-byte or 8-byte physical address. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("macaddr8", baseOID: 774, arrayOID: 775)] MacAddr8 = 54, #endregion @@ -303,14 +266,12 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "bit" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-bit.html - [BuiltInPostgresType("bit", baseOID: 1560, arrayOID: 1561)] Bit = 25, /// /// Corresponds to the PostgreSQL "varbit" type, a field storing a variable-length string of bits. /// /// See https://www.postgresql.org/docs/current/static/datatype-boolean.html - [BuiltInPostgresType("varbit", baseOID: 1562, arrayOID: 1563)] Varbit = 39, #endregion @@ -321,21 +282,18 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "tsvector" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html - [BuiltInPostgresType("tsvector", baseOID: 3614, arrayOID: 3643)] TsVector = 45, /// /// Corresponds to the PostgreSQL "tsquery" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html - [BuiltInPostgresType("tsquery", baseOID: 3615, arrayOID: 3645)] TsQuery = 46, /// /// Corresponds to the PostgreSQL "regconfig" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html - [BuiltInPostgresType("regconfig", baseOID: 3734, arrayOID: 3735)] Regconfig = 56, #endregion @@ -346,7 +304,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "uuid" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-uuid.html - [BuiltInPostgresType("uuid", baseOID: 2950, arrayOID: 2951)] Uuid = 27, #endregion @@ -357,7 +314,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "xml" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-xml.html - [BuiltInPostgresType("xml", baseOID: 142, arrayOID: 143)] Xml = 28, #endregion @@ -369,7 +325,6 @@ public enum NpgsqlDbType /// /// See https://www.postgresql.org/docs/current/static/datatype-json.html /// - [BuiltInPostgresType("json", baseOID: 114, arrayOID: 199)] Json = 35, /// @@ -380,7 +335,6 @@ public enum NpgsqlDbType /// Supported since PostgreSQL 9.4. /// See https://www.postgresql.org/docs/current/static/datatype-json.html /// - [BuiltInPostgresType("jsonb", baseOID: 3802, arrayOID: 3807)] Jsonb = 36, /// @@ -391,7 +345,6 @@ public enum NpgsqlDbType /// Supported since PostgreSQL 12. /// See https://www.postgresql.org/docs/current/datatype-json.html#DATATYPE-JSONPATH /// - [BuiltInPostgresType("jsonpath", baseOID: 4072, arrayOID: 4073)] JsonPath = 57, #endregion @@ -411,60 +364,51 @@ public enum NpgsqlDbType /// /// Corresponds to the PostgreSQL "refcursor" type. /// - [BuiltInPostgresType("refcursor", baseOID: 1790, arrayOID: 2201)] Refcursor = 23, /// /// Corresponds to the PostgreSQL internal "oidvector" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("oidvector", baseOID: 30, arrayOID: 1013)] Oidvector = 29, /// /// Corresponds to the PostgreSQL internal "int2vector" type. /// - [BuiltInPostgresType("int2vector", baseOID: 22, arrayOID: 1006)] Int2Vector = 52, /// /// Corresponds to the PostgreSQL "oid" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("oid", baseOID: 26, arrayOID: 1028)] Oid = 41, /// /// Corresponds to the PostgreSQL "xid" type, an internal transaction identifier. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("xid", baseOID: 28, arrayOID: 1011)] Xid = 42, /// /// Corresponds to the PostgreSQL "xid8" type, an internal transaction identifier. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("xid8", baseOID: 5069, arrayOID: 271)] Xid8 = 64, /// /// Corresponds to the PostgreSQL "cid" type, an internal command identifier. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("cid", baseOID: 29, arrayOID: 1012)] Cid = 43, /// /// Corresponds to the PostgreSQL "regtype" type, a numeric (OID) ID of a type in the pg_type table. /// - [BuiltInPostgresType("regtype", baseOID: 2206, arrayOID: 2211)] Regtype = 49, /// /// Corresponds to the PostgreSQL "tid" type, a tuple id identifying the physical location of a row within its table. /// - [BuiltInPostgresType("tid", baseOID: 27, arrayOID: 1010)] Tid = 53, /// @@ -475,7 +419,6 @@ public enum NpgsqlDbType /// See: https://www.postgresql.org/docs/current/datatype-pg-lsn.html and /// https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=7d03a83f4d0736ba869fa6f93973f7623a27038a /// - [BuiltInPostgresType("pg_lsn", baseOID: 3220, arrayOID: 3221)] PgLsn = 59, #endregion @@ -491,7 +434,6 @@ public enum NpgsqlDbType /// This value shouldn't ordinarily be used, and makes sense only when sending a data type /// unsupported by Npgsql. /// - [BuiltInPostgresType("unknown", baseOID: 705, arrayOID: 0)] Unknown = 40, #endregion @@ -635,38 +577,412 @@ public enum NpgsqlDbType #endregion } -/// -/// Represents a built-in PostgreSQL type as it appears in pg_type, including its name and OID. -/// Extension types with variable OIDs are not represented. -/// -sealed class BuiltInPostgresType : Attribute +static class NpgsqlDbTypeExtensions { - internal string Name { get; } - internal uint BaseOID { get; } - internal uint ArrayOID { get; } - - internal string? RangeName { get; } - internal uint RangeOID { get; } - internal string? MultirangeName { get; } - internal uint MultirangeOID { get; } - - internal BuiltInPostgresType(string name, uint baseOID, uint arrayOID) + internal static NpgsqlDbType? ToNpgsqlDbType(this DbType dbType) + => dbType switch + { + DbType.AnsiString => NpgsqlDbType.Text, + DbType.Binary => NpgsqlDbType.Bytea, + DbType.Byte => NpgsqlDbType.Smallint, + DbType.Boolean => NpgsqlDbType.Boolean, + DbType.Currency => NpgsqlDbType.Money, + DbType.Date => NpgsqlDbType.Date, + DbType.DateTime => LegacyTimestampBehavior ? NpgsqlDbType.Timestamp : NpgsqlDbType.TimestampTz, + DbType.Decimal => NpgsqlDbType.Numeric, + DbType.VarNumeric => NpgsqlDbType.Numeric, + DbType.Double => NpgsqlDbType.Double, + DbType.Guid => NpgsqlDbType.Uuid, + DbType.Int16 => NpgsqlDbType.Smallint, + DbType.Int32 => NpgsqlDbType.Integer, + DbType.Int64 => NpgsqlDbType.Bigint, + DbType.Single => NpgsqlDbType.Real, + DbType.String => NpgsqlDbType.Text, + DbType.Time => NpgsqlDbType.Time, + DbType.AnsiStringFixedLength => NpgsqlDbType.Text, + DbType.StringFixedLength => NpgsqlDbType.Text, + DbType.Xml => NpgsqlDbType.Xml, + DbType.DateTime2 => NpgsqlDbType.Timestamp, + DbType.DateTimeOffset => NpgsqlDbType.TimestampTz, + + DbType.Object => null, + DbType.SByte => null, + DbType.UInt16 => null, + DbType.UInt32 => null, + DbType.UInt64 => null, + + _ => throw new ArgumentOutOfRangeException(nameof(dbType), dbType, null) + }; + + public static DbType ToDbType(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + // Numeric types + NpgsqlDbType.Smallint => DbType.Int16, + NpgsqlDbType.Integer => DbType.Int32, + NpgsqlDbType.Bigint => DbType.Int64, + NpgsqlDbType.Real => DbType.Single, + NpgsqlDbType.Double => DbType.Double, + NpgsqlDbType.Numeric => DbType.Decimal, + NpgsqlDbType.Money => DbType.Currency, + + // Text types + NpgsqlDbType.Text => DbType.String, + NpgsqlDbType.Xml => DbType.Xml, + NpgsqlDbType.Varchar => DbType.String, + NpgsqlDbType.Char => DbType.String, + NpgsqlDbType.Name => DbType.String, + NpgsqlDbType.Citext => DbType.String, + + // Date/time types + NpgsqlDbType.Timestamp => LegacyTimestampBehavior ? DbType.DateTime : DbType.DateTime2, + NpgsqlDbType.TimestampTz => LegacyTimestampBehavior ? DbType.DateTimeOffset : DbType.DateTime, + NpgsqlDbType.Date => DbType.Date, + NpgsqlDbType.Time => DbType.Time, + + // Misc data types + NpgsqlDbType.Bytea => DbType.Binary, + NpgsqlDbType.Boolean => DbType.Boolean, + NpgsqlDbType.Uuid => DbType.Guid, + + _ => DbType.Object + }; + + /// Can return null when a custom range type is used. + internal static string? ToUnqualifiedDataTypeName(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + // Numeric types + NpgsqlDbType.Smallint => "int2", + NpgsqlDbType.Integer => "int4", + NpgsqlDbType.Bigint => "int8", + NpgsqlDbType.Real => "float4", + NpgsqlDbType.Double => "float8", + NpgsqlDbType.Numeric => "numeric", + NpgsqlDbType.Money => "money", + + // Text types + NpgsqlDbType.Text => "text", + NpgsqlDbType.Xml => "xml", + NpgsqlDbType.Varchar => "varchar", + NpgsqlDbType.Char => "bpchar", + NpgsqlDbType.Name => "name", + NpgsqlDbType.Refcursor => "refcursor", + NpgsqlDbType.Jsonb => "jsonb", + NpgsqlDbType.Json => "json", + NpgsqlDbType.JsonPath => "jsonpath", + + // Date/time types + NpgsqlDbType.Timestamp => "timestamp", + NpgsqlDbType.TimestampTz => "timestamptz", + NpgsqlDbType.Date => "date", + NpgsqlDbType.Time => "time", + NpgsqlDbType.TimeTz => "timetz", + NpgsqlDbType.Interval => "interval", + + // Network types + NpgsqlDbType.Cidr => "cidr", + NpgsqlDbType.Inet => "inet", + NpgsqlDbType.MacAddr => "macaddr", + NpgsqlDbType.MacAddr8 => "macaddr8", + + // Full-text search types + NpgsqlDbType.TsQuery => "tsquery", + NpgsqlDbType.TsVector => "tsvector", + + // Geometry types + NpgsqlDbType.Box => "box", + NpgsqlDbType.Circle => "circle", + NpgsqlDbType.Line => "line", + NpgsqlDbType.LSeg => "lseg", + NpgsqlDbType.Path => "path", + NpgsqlDbType.Point => "point", + NpgsqlDbType.Polygon => "polygon", + + + // UInt types + NpgsqlDbType.Oid => "oid", + NpgsqlDbType.Xid => "xid", + NpgsqlDbType.Xid8 => "xid8", + NpgsqlDbType.Cid => "cid", + NpgsqlDbType.Regtype => "regtype", + NpgsqlDbType.Regconfig => "regconfig", + + // Misc types + NpgsqlDbType.Boolean => "bool", + NpgsqlDbType.Bytea => "bytea", + NpgsqlDbType.Uuid => "uuid", + NpgsqlDbType.Varbit => "varbit", + NpgsqlDbType.Bit => "bit", + + // Built-in range types + NpgsqlDbType.IntegerRange => "int4range", + NpgsqlDbType.BigIntRange => "int8range", + NpgsqlDbType.NumericRange => "numrange", + NpgsqlDbType.TimestampRange => "tsrange", + NpgsqlDbType.TimestampTzRange => "tstzrange", + NpgsqlDbType.DateRange => "daterange", + + // Built-in multirange types + NpgsqlDbType.IntegerMultirange => "int4multirange", + NpgsqlDbType.BigIntMultirange => "int8multirange", + NpgsqlDbType.NumericMultirange => "nummultirange", + NpgsqlDbType.TimestampMultirange => "tsmultirange", + NpgsqlDbType.TimestampTzMultirange => "tstzmultirange", + NpgsqlDbType.DateMultirange => "datemultirange", + + // Internal types + NpgsqlDbType.Int2Vector => "int2vector", + NpgsqlDbType.Oidvector => "oidvector", + NpgsqlDbType.PgLsn => "pg_lsn", + NpgsqlDbType.Tid => "tid", + NpgsqlDbType.InternalChar => "char", + + // Plugin types + NpgsqlDbType.Citext => "citext", + NpgsqlDbType.Cube => "cube", + NpgsqlDbType.LQuery => "lquery", + NpgsqlDbType.LTree => "ltree", + NpgsqlDbType.LTxtQuery => "ltxtquery", + NpgsqlDbType.Hstore => "hstore", + NpgsqlDbType.Geometry => "geometry", + NpgsqlDbType.Geography => "geography", + + NpgsqlDbType.Unknown => "unknown", + + // Unknown cannot be composed + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Array) && (npgsqlDbType & ~NpgsqlDbType.Array) == NpgsqlDbType.Unknown + => "unknown", + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Range) && (npgsqlDbType & ~NpgsqlDbType.Range) == NpgsqlDbType.Unknown + => "unknown", + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) && (npgsqlDbType & ~NpgsqlDbType.Multirange) == NpgsqlDbType.Unknown + => "unknown", + + _ => npgsqlDbType.HasFlag(NpgsqlDbType.Array) + ? ToUnqualifiedDataTypeName(npgsqlDbType & ~NpgsqlDbType.Array) is { } name ? "_" + name : null + : null // e.g. ranges + }; + + internal static string ToUnqualifiedDataTypeNameOrThrow(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType.ToUnqualifiedDataTypeName() ?? throw new ArgumentOutOfRangeException(nameof(npgsqlDbType), npgsqlDbType, "Cannot convert NpgsqlDbType to DataTypeName"); + + /// Can return null when a plugin type or custom range type is used. + internal static DataTypeName? ToDataTypeName(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + // Numeric types + NpgsqlDbType.Smallint => DataTypeNames.Int2, + NpgsqlDbType.Integer => DataTypeNames.Int4, + NpgsqlDbType.Bigint => DataTypeNames.Int8, + NpgsqlDbType.Real => DataTypeNames.Float4, + NpgsqlDbType.Double => DataTypeNames.Float8, + NpgsqlDbType.Numeric => DataTypeNames.Numeric, + NpgsqlDbType.Money => DataTypeNames.Money, + + // Text types + NpgsqlDbType.Text => DataTypeNames.Text, + NpgsqlDbType.Xml => DataTypeNames.Xml, + NpgsqlDbType.Varchar => DataTypeNames.Varchar, + NpgsqlDbType.Char => DataTypeNames.Bpchar, + NpgsqlDbType.Name => DataTypeNames.Name, + NpgsqlDbType.Refcursor => DataTypeNames.RefCursor, + NpgsqlDbType.Jsonb => DataTypeNames.Jsonb, + NpgsqlDbType.Json => DataTypeNames.Json, + NpgsqlDbType.JsonPath => DataTypeNames.Jsonpath, + + // Date/time types + NpgsqlDbType.Timestamp => DataTypeNames.Timestamp, + NpgsqlDbType.TimestampTz => DataTypeNames.TimestampTz, + NpgsqlDbType.Date => DataTypeNames.Date, + NpgsqlDbType.Time => DataTypeNames.Time, + NpgsqlDbType.TimeTz => DataTypeNames.TimeTz, + NpgsqlDbType.Interval => DataTypeNames.Interval, + + // Network types + NpgsqlDbType.Cidr => DataTypeNames.Cidr, + NpgsqlDbType.Inet => DataTypeNames.Inet, + NpgsqlDbType.MacAddr => DataTypeNames.MacAddr, + NpgsqlDbType.MacAddr8 => DataTypeNames.MacAddr8, + + // Full-text search types + NpgsqlDbType.TsQuery => DataTypeNames.TsQuery, + NpgsqlDbType.TsVector => DataTypeNames.TsVector, + + // Geometry types + NpgsqlDbType.Box => DataTypeNames.Box, + NpgsqlDbType.Circle => DataTypeNames.Circle, + NpgsqlDbType.Line => DataTypeNames.Line, + NpgsqlDbType.LSeg => DataTypeNames.LSeg, + NpgsqlDbType.Path => DataTypeNames.Path, + NpgsqlDbType.Point => DataTypeNames.Point, + NpgsqlDbType.Polygon => DataTypeNames.Polygon, + + // UInt types + NpgsqlDbType.Oid => DataTypeNames.Oid, + NpgsqlDbType.Xid => DataTypeNames.Xid, + NpgsqlDbType.Xid8 => DataTypeNames.Xid8, + NpgsqlDbType.Cid => DataTypeNames.Cid, + NpgsqlDbType.Regtype => DataTypeNames.RegType, + NpgsqlDbType.Regconfig => DataTypeNames.RegConfig, + + // Misc types + NpgsqlDbType.Boolean => DataTypeNames.Bool, + NpgsqlDbType.Bytea => DataTypeNames.Bytea, + NpgsqlDbType.Uuid => DataTypeNames.Uuid, + NpgsqlDbType.Varbit => DataTypeNames.Varbit, + NpgsqlDbType.Bit => DataTypeNames.Bit, + + // Built-in range types + NpgsqlDbType.IntegerRange => DataTypeNames.Int4Range, + NpgsqlDbType.BigIntRange => DataTypeNames.Int8Range, + NpgsqlDbType.NumericRange => DataTypeNames.NumRange, + NpgsqlDbType.TimestampRange => DataTypeNames.TsRange, + NpgsqlDbType.TimestampTzRange => DataTypeNames.TsTzRange, + NpgsqlDbType.DateRange => DataTypeNames.DateRange, + + // Internal types + NpgsqlDbType.Int2Vector => DataTypeNames.Int2Vector, + NpgsqlDbType.Oidvector => DataTypeNames.OidVector, + NpgsqlDbType.PgLsn => DataTypeNames.PgLsn, + NpgsqlDbType.Tid => DataTypeNames.Tid, + NpgsqlDbType.InternalChar => DataTypeNames.Char, + + // Special types + NpgsqlDbType.Unknown => DataTypeNames.Unknown, + + // Unknown cannot be composed + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Array) && (npgsqlDbType & ~NpgsqlDbType.Array) == NpgsqlDbType.Unknown + => DataTypeNames.Unknown, + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Range) && (npgsqlDbType & ~NpgsqlDbType.Range) == NpgsqlDbType.Unknown + => DataTypeNames.Unknown, + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) && (npgsqlDbType & ~NpgsqlDbType.Multirange) == NpgsqlDbType.Unknown + => DataTypeNames.Unknown, + + // If both multirange and array are set we first remove array, so array is added to the outermost datatypename. + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Array) + => ToDataTypeName(npgsqlDbType & ~NpgsqlDbType.Array)?.ToArrayName(), + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) + => ToDataTypeName((npgsqlDbType | NpgsqlDbType.Range) & ~NpgsqlDbType.Multirange)?.ToDefaultMultirangeName(), + + // Plugin types don't have a stable fully qualified name. + _ => null + }; + + internal static NpgsqlDbType? ToNpgsqlDbType(this DataTypeName dataTypeName) => ToNpgsqlDbType(dataTypeName.UnqualifiedName); + /// Should not be used with display names, first normalize it instead. + internal static NpgsqlDbType? ToNpgsqlDbType(string normalizedDataTypeName) { - Name = name; - BaseOID = baseOID; - ArrayOID = arrayOID; + var unqualifiedName = normalizedDataTypeName.AsSpan(); + if (unqualifiedName.IndexOf('.') is not -1 and var index) + unqualifiedName = unqualifiedName.Slice(index + 1); + + return unqualifiedName switch + { + // Numeric types + "int2" => NpgsqlDbType.Smallint, + "int4" => NpgsqlDbType.Integer, + "int8" => NpgsqlDbType.Bigint, + "float4" => NpgsqlDbType.Real, + "float8" => NpgsqlDbType.Double, + "numeric" => NpgsqlDbType.Numeric, + "money" => NpgsqlDbType.Money, + + // Text types + "text" => NpgsqlDbType.Text, + "xml" => NpgsqlDbType.Xml, + "varchar" => NpgsqlDbType.Varchar, + "bpchar" => NpgsqlDbType.Char, + "name" => NpgsqlDbType.Name, + "refcursor" => NpgsqlDbType.Refcursor, + "jsonb" => NpgsqlDbType.Jsonb, + "json" => NpgsqlDbType.Json, + "jsonpath" => NpgsqlDbType.JsonPath, + + // Date/time types + "timestamp" => NpgsqlDbType.Timestamp, + "timestamptz" => NpgsqlDbType.TimestampTz, + "date" => NpgsqlDbType.Date, + "time" => NpgsqlDbType.Time, + "timetz" => NpgsqlDbType.TimeTz, + "interval" => NpgsqlDbType.Interval, + + // Network types + "cidr" => NpgsqlDbType.Cidr, + "inet" => NpgsqlDbType.Inet, + "macaddr" => NpgsqlDbType.MacAddr, + "macaddr8" => NpgsqlDbType.MacAddr8, + + // Full-text search types + "tsquery" => NpgsqlDbType.TsQuery, + "tsvector" => NpgsqlDbType.TsVector, + + // Geometry types + "box" => NpgsqlDbType.Box, + "circle" => NpgsqlDbType.Circle, + "line" => NpgsqlDbType.Line, + "lseg" => NpgsqlDbType.LSeg, + "path" => NpgsqlDbType.Path, + "point" => NpgsqlDbType.Point, + "polygon" => NpgsqlDbType.Polygon, + + // UInt types + "oid" => NpgsqlDbType.Oid, + "xid" => NpgsqlDbType.Xid, + "xid8" => NpgsqlDbType.Xid8, + "cid" => NpgsqlDbType.Cid, + "regtype" => NpgsqlDbType.Regtype, + "regconfig" => NpgsqlDbType.Regconfig, + + // Misc types + "bool" => NpgsqlDbType.Boolean, + "bytea" => NpgsqlDbType.Bytea, + "uuid" => NpgsqlDbType.Uuid, + "varbit" => NpgsqlDbType.Varbit, + "bit" => NpgsqlDbType.Bit, + + // Built-in range types + "int4range" => NpgsqlDbType.IntegerRange, + "int8range" => NpgsqlDbType.BigIntRange, + "numrange" => NpgsqlDbType.NumericRange, + "tsrange" => NpgsqlDbType.TimestampRange, + "tstzrange" => NpgsqlDbType.TimestampTzRange, + "daterange" => NpgsqlDbType.DateRange, + + // Built-in multirange types + "int4multirange" => NpgsqlDbType.IntegerMultirange, + "int8multirange" => NpgsqlDbType.BigIntMultirange, + "nummultirange" => NpgsqlDbType.NumericMultirange, + "tsmultirange" => NpgsqlDbType.TimestampMultirange, + "tstzmultirange" => NpgsqlDbType.TimestampTzMultirange, + "datemultirange" => NpgsqlDbType.DateMultirange, + + // Internal types + "int2vector" => NpgsqlDbType.Int2Vector, + "oidvector" => NpgsqlDbType.Oidvector, + "pg_lsn" => NpgsqlDbType.PgLsn, + "tid" => NpgsqlDbType.Tid, + "char" => NpgsqlDbType.InternalChar, + + // Plugin types + "citext" => NpgsqlDbType.Citext, + "cube" => NpgsqlDbType.Cube, + "lquery" => NpgsqlDbType.LQuery, + "ltree" => NpgsqlDbType.LTree, + "ltxtquery" => NpgsqlDbType.LTxtQuery, + "hstore" => NpgsqlDbType.Hstore, + "geometry" => NpgsqlDbType.Geometry, + "geography" => NpgsqlDbType.Geography, + + _ when unqualifiedName.IndexOf("unknown") != -1 + => !unqualifiedName.StartsWith("_", StringComparison.Ordinal) + ? NpgsqlDbType.Unknown + : null, + _ when unqualifiedName.StartsWith("_", StringComparison.Ordinal) + => ToNpgsqlDbType(unqualifiedName.Slice(1).ToString()) is { } elementNpgsqlDbType + ? elementNpgsqlDbType | NpgsqlDbType.Array + : null, + // e.g. custom ranges, plugin types etc. + _ => null + }; } - - internal BuiltInPostgresType( - string name, uint baseOID, uint arrayOID, string rangeName, uint rangeOID, string multirangeName, uint multirangeOID) - { - Name = name; - BaseOID = baseOID; - ArrayOID = arrayOID; - - RangeName = rangeName; - RangeOID = rangeOID; - MultirangeName = multirangeName; - MultirangeOID = multirangeOID; - } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs b/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs index f3c1d49139..221a82cb27 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs @@ -1,8 +1,4 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Text; -using Npgsql; +using System; // ReSharper disable once CheckNamespace namespace NpgsqlTypes; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs b/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs index 00ff4131e4..3520d8f734 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Globalization; // ReSharper disable once CheckNamespace @@ -150,13 +150,8 @@ public static bool TryParse(ReadOnlySpan s, out NpgsqlLogSequenceNumber re { if (s[i] != '/') continue; -#if NETSTANDARD2_0 - var firstPart = s.Slice(0, i).ToString(); - var secondPart = s.Slice(++i).ToString(); -#else var firstPart = s.Slice(0, i); var secondPart = s.Slice(++i); -#endif if (!uint.TryParse(firstPart, NumberStyles.AllowHexSpecifier, null, out var first)) { @@ -337,4 +332,4 @@ public static NpgsqlLogSequenceNumber Smaller(NpgsqlLogSequenceNumber value1, Np => double.IsNaN(nbytes) || double.IsInfinity(nbytes) ? throw new NotFiniteNumberException($"Cannot add {nbytes} to {nameof(NpgsqlLogSequenceNumber)}", nbytes) : new NpgsqlLogSequenceNumber(checked((ulong)(lsn._value + nbytes))); -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs index 96720522da..aa6ae2cf0d 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.ComponentModel; using System.Diagnostics.CodeAnalysis; using System.Globalization; @@ -80,7 +80,7 @@ namespace NpgsqlTypes; /// /// The used by to convert bounds into . /// - static readonly TypeConverter BoundConverter = TypeDescriptor.GetConverter(typeof(T)); + static TypeConverter? BoundConverter; /// /// True if implements ; otherwise, false. @@ -93,16 +93,14 @@ namespace NpgsqlTypes; public static readonly NpgsqlRange Empty = new(default, default, RangeFlags.Empty); /// - /// The lower bound of the range. Only valid when is false. + /// The lower bound of the range. Only valid when is false (i.e. the range is non-empty with a finite lower bound). /// - [MaybeNull, AllowNull] - public T LowerBound { get; } + public T? LowerBound { get; } /// - /// The upper bound of the range. Only valid when is false. + /// The upper bound of the range. Only valid when is false (i.e. the range is non-empty with a finite upper bound). /// - [MaybeNull, AllowNull] - public T UpperBound { get; } + public T? UpperBound { get; } /// /// The characteristics of the boundaries. @@ -122,11 +120,13 @@ namespace NpgsqlTypes; /// /// True if the lower bound is indefinite (i.e. infinite or unbounded); otherwise, false. /// + [MemberNotNullWhen(false, nameof(LowerBound))] public bool LowerBoundInfinite => (Flags & RangeFlags.LowerBoundInfinite) != 0; /// /// True if the upper bound is indefinite (i.e. infinite or unbounded); otherwise, false. /// + [MemberNotNullWhen(false, nameof(UpperBound))] public bool UpperBoundInfinite => (Flags & RangeFlags.UpperBoundInfinite) != 0; /// @@ -139,8 +139,8 @@ namespace NpgsqlTypes; /// /// The lower bound of the range. /// The upper bound of the range. - public NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound) - : this(lowerBound, true, false, upperBound, true, false) { } + public NpgsqlRange(T lowerBound, T upperBound) + : this(lowerBound, lowerBoundIsInclusive: true, lowerBoundInfinite: false, upperBound, upperBoundIsInclusive: true, upperBoundInfinite: false) { } /// /// Constructs an with definite bounds. @@ -149,10 +149,8 @@ public NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound) /// True if the lower bound is is part of the range (i.e. inclusive); otherwise, false. /// The upper bound of the range. /// True if the upper bound is part of the range (i.e. inclusive); otherwise, false. - public NpgsqlRange( - [AllowNull] T lowerBound, bool lowerBoundIsInclusive, - [AllowNull] T upperBound, bool upperBoundIsInclusive) - : this(lowerBound, lowerBoundIsInclusive, false, upperBound, upperBoundIsInclusive, false) { } + public NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, T upperBound, bool upperBoundIsInclusive) + : this(lowerBound, lowerBoundIsInclusive, lowerBoundInfinite: false, upperBound, upperBoundIsInclusive, upperBoundInfinite: false) { } /// /// Constructs an . @@ -163,9 +161,7 @@ public NpgsqlRange( /// The upper bound of the range. /// True if the upper bound is part of the range (i.e. inclusive); otherwise, false. /// True if the upper bound is indefinite (i.e. infinite or unbounded); otherwise, false. - public NpgsqlRange( - [AllowNull] T lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, - [AllowNull] T upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) + public NpgsqlRange(T? lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T? upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) : this( lowerBound, upperBound, @@ -181,7 +177,7 @@ public NpgsqlRange( /// The lower bound of the range. /// The upper bound of the range. /// The characteristics of the range boundaries. - internal NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound, RangeFlags flags) : this() + internal NpgsqlRange(T? lowerBound, T? upperBound, RangeFlags flags) : this() { // TODO: We need to check if the bounds are implicitly empty. E.g. '(1,1)' or '(0,0]'. // See: https://github.com/npgsql/npgsql/issues/1943. @@ -207,7 +203,7 @@ internal NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound, RangeFl /// /// True if the range is implicitly empty; otherwise, false. /// - static bool IsEmptyRange([AllowNull] T lowerBound, [AllowNull] T upperBound, RangeFlags flags) + static bool IsEmptyRange(T? lowerBound, T? upperBound, RangeFlags flags) { // --------------------------------------------------------------------------------- // We only want to check for those conditions that are unambiguously erroneous: @@ -234,7 +230,7 @@ static bool IsEmptyRange([AllowNull] T lowerBound, [AllowNull] T upperBound, Ran return false; if (!HasEquatableBounds) - return lowerBound?.Equals(upperBound) ?? false; + return lowerBound.Equals(upperBound); var lower = (IEquatable)lowerBound; var upper = (IEquatable)upperBound; @@ -375,10 +371,10 @@ public override string ToString() /// /// See: https://www.postgresql.org/docs/current/static/rangetypes.html /// + [RequiresUnreferencedCode("Parse implementations for certain types of T may require members that have been trimmed.")] public static NpgsqlRange Parse(string value) { - if (value is null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); value = value.Trim(); @@ -394,8 +390,8 @@ public static NpgsqlRange Parse(string value) if (!lowerInclusive && !lowerExclusive) throw new FormatException("Malformed range literal. Missing left parenthesis or bracket."); - var upperInclusive = value[value.Length - 1] == UpperInclusiveBound; - var upperExclusive = value[value.Length - 1] == UpperExclusiveBound; + var upperInclusive = value[^1] == UpperInclusiveBound; + var upperExclusive = value[^1] == UpperExclusiveBound; if (!upperInclusive && !upperExclusive) throw new FormatException("Malformed range literal. Missing right parenthesis or bracket."); @@ -429,6 +425,7 @@ public static NpgsqlRange Parse(string value) string.Equals(upperSegment, NullLiteral, StringComparison.OrdinalIgnoreCase) || string.Equals(upperSegment, UpperInfinityLiteral, StringComparison.OrdinalIgnoreCase); + BoundConverter ??= TypeDescriptor.GetConverter(typeof(T)); var lower = lowerInfinite ? default : (T?)BoundConverter.ConvertFromString(lowerSegment); var upper = upperInfinite ? default : (T?)BoundConverter.ConvertFromString(upperSegment); @@ -438,6 +435,7 @@ public static NpgsqlRange Parse(string value) /// /// Represents a type converter for . /// + [RequiresUnreferencedCode("ConvertFrom implementations for certain types of T may require members that have been trimmed.")] public class RangeTypeConverter : TypeConverter { /// @@ -524,4 +522,4 @@ enum RangeFlags : byte /// The upper bound is both inclusive and indefinite. This represents an error condition. /// UpperInclusiveInfinite = UpperBoundInclusive | UpperBoundInfinite -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index ddc8b712ec..3e9b5995e1 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -1,8 +1,7 @@ -using System; +using System; using System.Collections.Generic; using System.Text; -#pragma warning disable CA1034 // ReSharper disable once CheckNamespace namespace NpgsqlTypes; @@ -77,10 +76,10 @@ public override string ToString() /// /// /// + [Obsolete("Client-side parsing of NpgsqlTsQuery is unreliable and cannot fully duplicate the PostgreSQL logic. Use PG functions instead (e.g. to_tsquery)")] public static NpgsqlTsQuery Parse(string value) { - if (value == null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); var valStack = new Stack(); var opStack = new Stack(); @@ -89,7 +88,7 @@ public static NpgsqlTsQuery Parse(string value) var pos = 0; var expectingBinOp = false; - var lastFollowedByOpDistance = -1; + short lastFollowedByOpDistance = -1; NextToken: if (pos >= value.Length) @@ -125,7 +124,7 @@ public static NpgsqlTsQuery Parse(string value) { lastFollowedByOpDistance = 1; } - else if (!int.TryParse(followedByOpDistanceString, out lastFollowedByOpDistance) + else if (!short.TryParse(followedByOpDistanceString, out lastFollowedByOpDistance) || lastFollowedByOpDistance < 0) { throw new FormatException("Syntax error in tsquery. Malformed distance in 'followed by' operator."); @@ -172,7 +171,7 @@ public static NpgsqlTsQuery Parse(string value) var tsOp = opStack.Pop(); valStack.Push((char)tsOp switch { - '&' => (NpgsqlTsQuery)new NpgsqlTsQueryAnd(left, right), + '&' => new NpgsqlTsQueryAnd(left, right), '|' => new NpgsqlTsQueryOr(left, right), '<' => new NpgsqlTsQueryFollowedBy(left, tsOp.FollowedByDistance, right), _ => throw new FormatException("Syntax error in tsquery") @@ -253,18 +252,27 @@ public static NpgsqlTsQuery Parse(string value) if (pos >= value.Length) goto Finish; ch = value[pos]; - if (ch == '*') + switch (ch) + { + case '*': ((NpgsqlTsQueryLexeme)valStack.Peek()).IsPrefixSearch = true; - else if (ch == 'a' || ch == 'A') + break; + case 'a' or 'A': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.A; - else if (ch == 'b' || ch == 'B') + break; + case 'b' or 'B': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.B; - else if (ch == 'c' || ch == 'C') + break; + case 'c' or 'C': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.C; - else if (ch == 'd' || ch == 'D') + break; + case 'd' or 'D': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.D; - else + break; + default: goto PushedVal; + } + pos++; goto InWeightInfo; @@ -338,12 +346,12 @@ public static NpgsqlTsQuery Parse(string value) } /// - public override int GetHashCode() => - throw new NotImplementedException(); + public override int GetHashCode() + => throw new NotSupportedException("Must be overridden"); /// - public override bool Equals(object? obj) => - obj is NpgsqlTsQuery query && query.Equals(this); + public override bool Equals(object? obj) + => obj is NpgsqlTsQuery query && query.Equals(this); /// /// Returns a value indicating whether this instance and a specified object represent the same value. @@ -358,9 +366,8 @@ public override bool Equals(object? obj) => /// The first object to compare. /// The second object to compare. /// if and are equal; otherwise, . - public static bool operator ==(NpgsqlTsQuery? left, NpgsqlTsQuery? right) => - left is null ? right is null : left.Equals(right); - + public static bool operator ==(NpgsqlTsQuery? left, NpgsqlTsQuery? right) + => left is null ? right is null : left.Equals(right); /// /// Indicates whether the values of two specified objects are not equal. @@ -368,20 +375,14 @@ public override bool Equals(object? obj) => /// The first object to compare. /// The second object to compare. /// if and are not equal; otherwise, . - public static bool operator !=(NpgsqlTsQuery? left, NpgsqlTsQuery? right) => - left is null ? right is not null : !left.Equals(right); + public static bool operator !=(NpgsqlTsQuery? left, NpgsqlTsQuery? right) + => left is null ? right is not null : !left.Equals(right); } -readonly struct NpgsqlTsQueryOperator +readonly struct NpgsqlTsQueryOperator(char character, short followedByDistance) { - public readonly char Char; - public readonly int FollowedByDistance; - - public NpgsqlTsQueryOperator(char character, int followedByDistance) - { - Char = character; - FollowedByDistance = followedByDistance; - } + public readonly char Char = character; + public readonly short FollowedByDistance = followedByDistance; public static implicit operator NpgsqlTsQueryOperator(char c) => new(c, 0); public static implicit operator char(NpgsqlTsQueryOperator o) => o.Char; @@ -402,8 +403,7 @@ public string Text get => _text; set { - if (string.IsNullOrEmpty(value)) - throw new ArgumentException("Text is null or empty string", nameof(value)); + ArgumentException.ThrowIfNullOrEmpty(value); _text = value; } @@ -461,10 +461,8 @@ public NpgsqlTsQueryLexeme(string text, Weight weights, bool isPrefixSearch) /// /// Weight enum, can be OR'ed together. /// -#pragma warning disable CA1714 [Flags] public enum Weight -#pragma warning restore CA1714 { /// /// None @@ -506,15 +504,15 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryLexeme lexeme && - lexeme.Text == Text && - lexeme.Weights == Weights && - lexeme.IsPrefixSearch == IsPrefixSearch; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryLexeme lexeme && + lexeme.Text == Text && + lexeme.Weights == Weights && + lexeme.IsPrefixSearch == IsPrefixSearch; /// - public override int GetHashCode() => - HashCode.Combine(Text, Weights, IsPrefixSearch); + public override int GetHashCode() + => HashCode.Combine(Text, Weights, IsPrefixSearch); } /// @@ -533,9 +531,7 @@ public sealed class NpgsqlTsQueryNot : NpgsqlTsQuery /// public NpgsqlTsQueryNot(NpgsqlTsQuery child) : base(NodeKind.Not) - { - Child = child; - } + => Child = child; internal override void WriteCore(StringBuilder sb, bool first = false) { @@ -555,13 +551,12 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryNot not && - not.Child == Child; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryNot not && not.Child == Child; /// - public override int GetHashCode() => - Child?.GetHashCode() ?? 0; + public override int GetHashCode() + => Child?.GetHashCode() ?? 0; } /// @@ -611,14 +606,12 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryAnd and && - and.Left == Left && - and.Right == Right; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryAnd and && and.Left == Left && and.Right == Right; /// - public override int GetHashCode() => - HashCode.Combine(Left, Right); + public override int GetHashCode() + => HashCode.Combine(Left, Right); } /// @@ -649,14 +642,12 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryOr or && - or.Left == Left && - or.Right == Right; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryOr or && or.Left == Left && or.Right == Right; /// - public override int GetHashCode() => - HashCode.Combine(Left, Right); + public override int GetHashCode() + => HashCode.Combine(Left, Right); } /// @@ -667,7 +658,7 @@ public sealed class NpgsqlTsQueryFollowedBy : NpgsqlTsQueryBinOp /// /// The distance between the 2 nodes, in lexemes. /// - public int Distance { get; set; } + public short Distance { get; set; } /// /// Creates a "followed by" operator, specifying 2 child nodes and the @@ -678,12 +669,11 @@ public sealed class NpgsqlTsQueryFollowedBy : NpgsqlTsQueryBinOp /// public NpgsqlTsQueryFollowedBy( NpgsqlTsQuery left, - int distance, + short distance, NpgsqlTsQuery right) : base(NodeKind.Phrase, left, right) { - if (distance < 0) - throw new ArgumentOutOfRangeException(nameof(distance)); + ArgumentOutOfRangeException.ThrowIfNegative(distance); Distance = distance; } @@ -708,19 +698,19 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryFollowedBy followedBy && - followedBy.Left == Left && - followedBy.Right == Right && - followedBy.Distance == Distance; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryFollowedBy followedBy && + followedBy.Left == Left && + followedBy.Right == Right && + followedBy.Distance == Distance; /// - public override int GetHashCode() => - HashCode.Combine(Left, Right, Distance); + public override int GetHashCode() + => HashCode.Combine(Left, Right, Distance); } /// -/// Represents an empty tsquery. Shold only be used as top node. +/// Represents an empty tsquery. Should only be used as top node. /// public sealed class NpgsqlTsQueryEmpty : NpgsqlTsQuery { @@ -732,10 +722,10 @@ public NpgsqlTsQueryEmpty() : base(NodeKind.Empty) {} internal override void WriteCore(StringBuilder sb, bool first = false) { } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryEmpty; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryEmpty; /// - public override int GetHashCode() => - Kind.GetHashCode(); -} \ No newline at end of file + public override int GetHashCode() + => Kind.GetHashCode(); +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 0cb4f5e371..b534c05755 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -1,9 +1,8 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; using System.Text; -#pragma warning disable CA1040, CA1034 // ReSharper disable once CheckNamespace namespace NpgsqlTypes; @@ -12,6 +11,11 @@ namespace NpgsqlTypes; /// public sealed class NpgsqlTsVector : IEnumerable, IEquatable { + /// + /// Represents an empty tsvector. + /// + public static readonly NpgsqlTsVector Empty = new NpgsqlTsVector([], noCheck: true); + readonly List _lexemes; internal NpgsqlTsVector(List lexemes, bool noCheck = false) @@ -22,7 +26,7 @@ internal NpgsqlTsVector(List lexemes, bool noCheck = false) return; } - _lexemes = new List(lexemes); + _lexemes = [..lexemes]; if (_lexemes.Count == 0) return; @@ -74,10 +78,10 @@ internal NpgsqlTsVector(List lexemes, bool noCheck = false) /// /// /// + [Obsolete("Client-side parsing of NpgsqlTsVector is unreliable and cannot fully duplicate the PostgreSQL logic. Use PG functions instead (e.g. to_tsvector)")] public static NpgsqlTsVector Parse(string value) { - if (value == null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); var lexemes = new List(); var pos = 0; @@ -167,7 +171,7 @@ public static NpgsqlTsVector Parse(string value) goto WaitWord; StartPosInfo: - wordEntryPositions = new List(); + wordEntryPositions = []; InPosInfo: var digitPos = pos; @@ -189,7 +193,7 @@ public static NpgsqlTsVector Parse(string value) if (value[pos] >= 'B' && value[pos] <= 'D' || value[pos] >= 'b' && value[pos] <= 'd') { var weight = value[pos]; - if (weight >= 'b' && weight <= 'd') + if (weight is >= 'b' and <= 'd') weight = (char)(weight - ('b' - 'B')); wordEntryPositions.Add(new Lexeme.WordEntryPos(wordPos, Lexeme.Weight.D + ('D' - weight))); pos++; @@ -321,7 +325,7 @@ internal Lexeme(string text, List? wordEntryPositions, bool noCopy { Text = text; if (wordEntryPositions != null) - WordEntryPositions = noCopy ? wordEntryPositions : new List(wordEntryPositions); + WordEntryPositions = noCopy ? wordEntryPositions : [..wordEntryPositions]; else WordEntryPositions = null; } @@ -343,7 +347,7 @@ internal Lexeme(string text, List? wordEntryPositions, bool noCopy return list; // Don't change the original list, as the user might inspect it later if he holds a reference to the lexeme's list - list = new List(list); + list = [..list]; list.Sort((x, y) => x.Pos.CompareTo(y.Pos)); @@ -414,9 +418,7 @@ public struct WordEntryPos : IEquatable internal short Value { get; } internal WordEntryPos(short value) - { - Value = value; - } + => Value = value; /// /// Creates a WordEntryPos with a given position and weight. @@ -551,4 +553,4 @@ public bool Equals(Lexeme o) /// public static bool operator !=(Lexeme left, Lexeme right) => !left.Equals(right); } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 4a6c4d112b..4f63a9defb 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -1,13 +1,10 @@ using System; using System.Collections; using System.Collections.Generic; -using System.Diagnostics; using System.Globalization; using System.Net; using System.Net.Sockets; using System.Text; -using System.Text.RegularExpressions; -using Npgsql.Util; #pragma warning disable 1591 @@ -20,19 +17,10 @@ namespace NpgsqlTypes; /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -public struct NpgsqlPoint : IEquatable +public struct NpgsqlPoint(double x, double y) : IEquatable { - static readonly Regex Regex = new(@"\((-?\d+.?\d*),(-?\d+.?\d*)\)"); - - public double X { get; set; } - public double Y { get; set; } - - public NpgsqlPoint(double x, double y) - : this() - { - X = x; - Y = y; - } + public double X { get; set; } = x; + public double Y { get; set; } = y; // ReSharper disable CompareOfFloatsByEqualityOperator public bool Equals(NpgsqlPoint other) => X == other.X && Y == other.Y; @@ -46,20 +34,12 @@ public override bool Equals(object? obj) public static bool operator !=(NpgsqlPoint x, NpgsqlPoint y) => !(x == y); public override int GetHashCode() - => X.GetHashCode() ^ PGUtil.RotateShift(Y.GetHashCode(), PGUtil.BitsInInt / 2); - - public static NpgsqlPoint Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) { - throw new FormatException("Not a valid point: " + s); - } - return new NpgsqlPoint(double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat)); - } + => HashCode.Combine(X, Y); public override string ToString() => string.Format(CultureInfo.InvariantCulture, "({0},{1})", X, Y); + + public void Deconstruct(out double x, out double y) => (x, y) = (X, Y); } /// @@ -68,46 +48,28 @@ public override string ToString() /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -public struct NpgsqlLine : IEquatable +public struct NpgsqlLine(double a, double b, double c) : IEquatable { - static readonly Regex Regex = new(@"\{(-?\d+.?\d*),(-?\d+.?\d*),(-?\d+.?\d*)\}"); - - public double A { get; set; } - public double B { get; set; } - public double C { get; set; } - - public NpgsqlLine(double a, double b, double c) - : this() - { - A = a; - B = b; - C = c; - } - - public static NpgsqlLine Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) - throw new FormatException("Not a valid line: " + s); - return new NpgsqlLine( - double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat) - ); - } + public double A { get; set; } = a; + public double B { get; set; } = b; + public double C { get; set; } = c; public override string ToString() => string.Format(CultureInfo.InvariantCulture, "{{{0},{1},{2}}}", A, B, C); - public override int GetHashCode() => A.GetHashCode() * B.GetHashCode() * C.GetHashCode(); + public override int GetHashCode() + => HashCode.Combine(A, B, C); - public bool Equals(NpgsqlLine other) => A == other.A && B == other.B && C == other.C; + public bool Equals(NpgsqlLine other) + => A == other.A && B == other.B && C == other.C; public override bool Equals(object? obj) => obj is NpgsqlLine line && Equals(line); public static bool operator ==(NpgsqlLine x, NpgsqlLine y) => x.Equals(y); public static bool operator !=(NpgsqlLine x, NpgsqlLine y) => !(x == y); + + public void Deconstruct(out double a, out double b, out double c) => (a, b, c) = (A, B, C); } /// @@ -115,8 +77,6 @@ public override bool Equals(object? obj) /// public struct NpgsqlLSeg : IEquatable { - static readonly Regex Regex = new(@"\[\((-?\d+.?\d*),(-?\d+.?\d*)\),\((-?\d+.?\d*),(-?\d+.?\d*)\)\]"); - public NpgsqlPoint Start { get; set; } public NpgsqlPoint End { get; set; } @@ -130,40 +90,25 @@ public NpgsqlLSeg(NpgsqlPoint start, NpgsqlPoint end) public NpgsqlLSeg(double startx, double starty, double endx, double endy) : this() { Start = new NpgsqlPoint(startx, starty); - End = new NpgsqlPoint(endx, endy); - } - - public static NpgsqlLSeg Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) { - throw new FormatException("Not a valid line: " + s); - } - return new NpgsqlLSeg( - double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[4].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat) - ); - + End = new NpgsqlPoint(endx, endy); } public override string ToString() => string.Format(CultureInfo.InvariantCulture, "[{0},{1}]", Start, End); public override int GetHashCode() - => Start.X.GetHashCode() ^ - PGUtil.RotateShift(Start.Y.GetHashCode(), PGUtil.BitsInInt / 4) ^ - PGUtil.RotateShift(End.X.GetHashCode(), PGUtil.BitsInInt / 2) ^ - PGUtil.RotateShift(End.Y.GetHashCode(), PGUtil.BitsInInt * 3 / 4); + => HashCode.Combine(Start.X, Start.Y, End.X, End.Y); - public bool Equals(NpgsqlLSeg other) => Start == other.Start && End == other.End; + public bool Equals(NpgsqlLSeg other) + => Start == other.Start && End == other.End; public override bool Equals(object? obj) => obj is NpgsqlLSeg seg && Equals(seg); public static bool operator ==(NpgsqlLSeg x, NpgsqlLSeg y) => x.Equals(y); public static bool operator !=(NpgsqlLSeg x, NpgsqlLSeg y) => !(x == y); + + public void Deconstruct(out NpgsqlPoint start, out NpgsqlPoint end) => (start, end) = (Start, End); } /// @@ -174,15 +119,33 @@ public override bool Equals(object? obj) /// public struct NpgsqlBox : IEquatable { - static readonly Regex Regex = new(@"\((-?\d+.?\d*),(-?\d+.?\d*)\),\((-?\d+.?\d*),(-?\d+.?\d*)\)"); + NpgsqlPoint _upperRight; + public NpgsqlPoint UpperRight + { + get => _upperRight; + set + { + _upperRight = value; + NormalizeBox(); + } + } - public NpgsqlPoint UpperRight { get; set; } - public NpgsqlPoint LowerLeft { get; set; } + NpgsqlPoint _lowerLeft; + public NpgsqlPoint LowerLeft + { + get => _lowerLeft; + set + { + _lowerLeft = value; + NormalizeBox(); + } + } public NpgsqlBox(NpgsqlPoint upperRight, NpgsqlPoint lowerLeft) : this() { - UpperRight = upperRight; - LowerLeft = lowerLeft; + _upperRight = upperRight; + _lowerLeft = lowerLeft; + NormalizeBox(); } public NpgsqlBox(double top, double right, double bottom, double left) @@ -197,7 +160,8 @@ public NpgsqlBox(double top, double right, double bottom, double left) public bool IsEmpty => Width == 0 || Height == 0; - public bool Equals(NpgsqlBox other) => UpperRight == other.UpperRight && LowerLeft == other.LowerLeft; + public bool Equals(NpgsqlBox other) + => UpperRight == other.UpperRight && LowerLeft == other.LowerLeft; public override bool Equals(object? obj) => obj is NpgsqlBox box && Equals(box); @@ -207,22 +171,43 @@ public override bool Equals(object? obj) public override string ToString() => string.Format(CultureInfo.InvariantCulture, "{0},{1}", UpperRight, LowerLeft); - public static NpgsqlBox Parse(string s) + public override int GetHashCode() + => HashCode.Combine(Top, Right, Bottom, LowerLeft); + + // Swaps corners for isomorphic boxes, to mirror postgres behavior. + // See: https://github.com/postgres/postgres/blob/af2324fabf0020e464b0268be9ef03e8f46ed84b/src/backend/utils/adt/geo_ops.c#L435-L447 + void NormalizeBox() { - var m = Regex.Match(s); - return new NpgsqlBox( - new NpgsqlPoint(double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat)), - new NpgsqlPoint(double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[4].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat)) - ); + if (_upperRight.X < _lowerLeft.X) + (_upperRight.X, _lowerLeft.X) = (_lowerLeft.X, _upperRight.X); + + if (_upperRight.Y < _lowerLeft.Y) + (_upperRight.Y, _lowerLeft.Y) = (_lowerLeft.Y, _upperRight.Y); } - public override int GetHashCode() - => Top.GetHashCode() ^ - PGUtil.RotateShift(Right.GetHashCode(), PGUtil.BitsInInt / 4) ^ - PGUtil.RotateShift(Bottom.GetHashCode(), PGUtil.BitsInInt / 2) ^ - PGUtil.RotateShift(LowerLeft.GetHashCode(), PGUtil.BitsInInt * 3 / 4); + public void Deconstruct(out NpgsqlPoint lowerLeft, out NpgsqlPoint upperRight) + { + lowerLeft = LowerLeft; + upperRight = UpperRight; + } + + public void Deconstruct(out double left, out double right, out double bottom, out double top) + { + left = Left; + right = Right; + bottom = Bottom; + top = Top; + } + + public void Deconstruct(out double left, out double right, out double bottom, out double top, out double width, out double height) + { + left = Left; + right = Right; + bottom = Bottom; + top = Top; + width = Width; + height = Height; + } } /// @@ -230,12 +215,18 @@ public override int GetHashCode() /// public struct NpgsqlPath : IList, IEquatable { - readonly List _points; + List _points; + + List Points => _points ??= []; + public bool Open { get; set; } - public NpgsqlPath(IEnumerable points, bool open) : this() + public NpgsqlPath() + => _points = []; + + public NpgsqlPath(IEnumerable points, bool open) { - _points = new List(points); + _points = [..points]; Open = open; } @@ -244,7 +235,7 @@ public NpgsqlPath(params NpgsqlPoint[] points) : this(points, false) {} public NpgsqlPath(bool open) : this() { - _points = new List(); + _points = []; Open = open; } @@ -258,23 +249,23 @@ public NpgsqlPath(int capacity) : this(capacity, false) {} public NpgsqlPoint this[int index] { - get => _points[index]; - set => _points[index] = value; + get => Points[index]; + set => Points[index] = value; } - public int Capacity => _points.Capacity; - public int Count => _points.Count; + public int Capacity => Points.Capacity; + public int Count => _points?.Count ?? 0; public bool IsReadOnly => false; - public int IndexOf(NpgsqlPoint item) => _points.IndexOf(item); - public void Insert(int index, NpgsqlPoint item) => _points.Insert(index, item); - public void RemoveAt(int index) => _points.RemoveAt(index); - public void Add(NpgsqlPoint item) => _points.Add(item); - public void Clear() => _points.Clear(); - public bool Contains(NpgsqlPoint item) => _points.Contains(item); - public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => _points.CopyTo(array, arrayIndex); - public bool Remove(NpgsqlPoint item) => _points.Remove(item); - public IEnumerator GetEnumerator() => _points.GetEnumerator(); + public int IndexOf(NpgsqlPoint item) => Points.IndexOf(item); + public void Insert(int index, NpgsqlPoint item) => Points.Insert(index, item); + public void RemoveAt(int index) => Points.RemoveAt(index); + public void Add(NpgsqlPoint item) => Points.Add(item); + public void Clear() => Points.Clear(); + public bool Contains(NpgsqlPoint item) => Points.Contains(item); + public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => Points.CopyTo(array, arrayIndex); + public bool Remove(NpgsqlPoint item) => Points.Remove(item); + public IEnumerator GetEnumerator() => Points.GetEnumerator(); IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); public bool Equals(NpgsqlPath other) @@ -297,15 +288,16 @@ public override bool Equals(object? obj) public override int GetHashCode() { - var ret = 266370105;//seed with something other than zero to make paths of all zeros hash differently. + var hashCode = new HashCode(); + hashCode.Add(Open); + foreach (var point in this) { - //The ideal amount to shift each value is one that would evenly spread it throughout - //the resultant bytes. Using the current result % 32 is essentially using a random value - //but one that will be the same on subsequent calls. - ret ^= PGUtil.RotateShift(point.GetHashCode(), ret % PGUtil.BitsInInt); + hashCode.Add(point.X); + hashCode.Add(point.Y); } - return Open ? ret : -ret; + + return hashCode.ToHashCode(); } public override string ToString() @@ -313,38 +305,16 @@ public override string ToString() var sb = new StringBuilder(); sb.Append(Open ? '[' : '('); int i; - for (i = 0; i < _points.Count; i++) + for (i = 0; i < Count; i++) { var p = _points[i]; sb.AppendFormat(CultureInfo.InvariantCulture, "({0},{1})", p.X, p.Y); if (i < _points.Count - 1) - sb.Append(","); + sb.Append(','); } sb.Append(Open ? ']' : ')'); return sb.ToString(); } - - public static NpgsqlPath Parse(string s) - { - var open = s[0] switch - { - '[' => true, - '(' => false, - _ => throw new Exception("Invalid path string: " + s) - }; - Debug.Assert(s[s.Length - 1] == (open ? ']' : ')')); - var result = new NpgsqlPath(open); - var i = 1; - while (true) - { - var i2 = s.IndexOf(')', i); - result.Add(NpgsqlPoint.Parse(s.Substring(i, i2 - i + 1))); - if (s[i2 + 1] != ',') - break; - i = i2 + 2; - } - return result; - } } /// @@ -352,39 +322,40 @@ public static NpgsqlPath Parse(string s) /// public struct NpgsqlPolygon : IList, IEquatable { - readonly List _points; + List _points; + + List Points => _points ??= []; + + public NpgsqlPolygon() + => _points = []; public NpgsqlPolygon(IEnumerable points) - { - _points = new List(points); - } + => _points = [..points]; - public NpgsqlPolygon(params NpgsqlPoint[] points) : this ((IEnumerable) points) {} + public NpgsqlPolygon(params NpgsqlPoint[] points) : this((IEnumerable) points) {} public NpgsqlPolygon(int capacity) - { - _points = new List(capacity); - } + => _points = new List(capacity); public NpgsqlPoint this[int index] { - get => _points[index]; - set => _points[index] = value; + get => Points[index]; + set => Points[index] = value; } - public int Capacity => _points.Capacity; - public int Count => _points.Count; + public int Capacity => Points.Capacity; + public int Count => _points?.Count ?? 0; public bool IsReadOnly => false; - public int IndexOf(NpgsqlPoint item) => _points.IndexOf(item); - public void Insert(int index, NpgsqlPoint item) => _points.Insert(index, item); - public void RemoveAt(int index) => _points.RemoveAt(index); - public void Add(NpgsqlPoint item) => _points.Add(item); - public void Clear() => _points.Clear(); - public bool Contains(NpgsqlPoint item) => _points.Contains(item); - public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => _points.CopyTo(array, arrayIndex); - public bool Remove(NpgsqlPoint item) => _points.Remove(item); - public IEnumerator GetEnumerator() => _points.GetEnumerator(); + public int IndexOf(NpgsqlPoint item) => Points.IndexOf(item); + public void Insert(int index, NpgsqlPoint item) => Points.Insert(index, item); + public void RemoveAt(int index) => Points.RemoveAt(index); + public void Add(NpgsqlPoint item) => Points.Add(item); + public void Clear() => Points.Clear(); + public bool Contains(NpgsqlPoint item) => Points.Contains(item); + public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => Points.CopyTo(array, arrayIndex); + public bool Remove(NpgsqlPoint item) => Points.Remove(item); + public IEnumerator GetEnumerator() => Points.GetEnumerator(); IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); public bool Equals(NpgsqlPolygon other) @@ -407,30 +378,15 @@ public override bool Equals(object? obj) public override int GetHashCode() { - var ret = 266370105;//seed with something other than zero to make paths of all zeros hash differently. + var hashCode = new HashCode(); + foreach (var point in this) { - //The ideal amount to shift each value is one that would evenly spread it throughout - //the resultant bytes. Using the current result % 32 is essentially using a random value - //but one that will be the same on subsequent calls. - ret ^= PGUtil.RotateShift(point.GetHashCode(), ret % PGUtil.BitsInInt); + hashCode.Add(point.X); + hashCode.Add(point.Y); } - return ret; - } - public static NpgsqlPolygon Parse(string s) - { - var points = new List(); - var i = 1; - while (true) - { - var i2 = s.IndexOf(')', i); - points.Add(NpgsqlPoint.Parse(s.Substring(i, i2 - i + 1))); - if (s[i2 + 1] != ',') - break; - i = i2 + 2; - } - return new NpgsqlPolygon(points); + return hashCode.ToHashCode(); } public override string ToString() @@ -438,7 +394,7 @@ public override string ToString() var sb = new StringBuilder(); sb.Append('('); int i; - for (i = 0; i < _points.Count; i++) + for (i = 0; i < Count; i++) { var p = _points[i]; sb.AppendFormat(CultureInfo.InvariantCulture, "({0},{1})", p.X, p.Y); @@ -454,37 +410,21 @@ public override string ToString() /// /// Represents a PostgreSQL Circle type. /// -public struct NpgsqlCircle : IEquatable +public struct NpgsqlCircle(double x, double y, double radius) : IEquatable { - static readonly Regex Regex = new(@"<\((-?\d+.?\d*),(-?\d+.?\d*)\),(\d+.?\d*)>"); - - public double X { get; set; } - public double Y { get; set; } - public double Radius { get; set; } + public double X { get; set; } = x; + public double Y { get; set; } = y; + public double Radius { get; set; } = radius; public NpgsqlCircle(NpgsqlPoint center, double radius) - : this() + : this(center.X, center.Y, radius) { - X = center.X; - Y = center.Y; - Radius = radius; - } - - public NpgsqlCircle(double x, double y, double radius) : this() - { - X = x; - Y = y; - Radius = radius; } public NpgsqlPoint Center { get => new(X, Y); - set - { - X = value.X; - Y = value.Y; - } + set => (X, Y) = (value.X, value.Y); } // ReSharper disable CompareOfFloatsByEqualityOperator @@ -495,19 +435,6 @@ public bool Equals(NpgsqlCircle other) public override bool Equals(object? obj) => obj is NpgsqlCircle circle && Equals(circle); - public static NpgsqlCircle Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) - throw new FormatException("Not a valid circle: " + s); - - return new NpgsqlCircle( - double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat) - ); - } - public override string ToString() => string.Format(CultureInfo.InvariantCulture, "<({0},{1}),{2}>", X, Y, Radius); @@ -515,100 +442,139 @@ public override string ToString() public static bool operator !=(NpgsqlCircle x, NpgsqlCircle y) => !(x == y); public override int GetHashCode() - => X.GetHashCode() * Y.GetHashCode() * Radius.GetHashCode(); + => HashCode.Combine(X, Y, Radius); + + public void Deconstruct(out double x, out double y, out double radius) + { + x = X; + y = Y; + radius = Radius; + } + + public void Deconstruct(out NpgsqlPoint center, out double radius) + { + center = Center; + radius = Radius; + } } /// -/// Represents a PostgreSQL inet type, which is a combination of an IPAddress and a -/// subnet mask. +/// Represents a PostgreSQL inet type, which is a combination of an IPAddress and a subnet mask. /// /// /// https://www.postgresql.org/docs/current/static/datatype-net-types.html /// -[Obsolete("Use ValueTuple instead")] -public struct NpgsqlInet : IEquatable +public readonly record struct NpgsqlInet { - public IPAddress Address { get; set; } - public int Netmask { get; set; } + public IPAddress Address { get; } + public byte Netmask { get; } - public NpgsqlInet(IPAddress address, int netmask) + public NpgsqlInet(IPAddress address, byte netmask) { - if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) - throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); - + CheckAddressFamily(address); Address = address; Netmask = netmask; } public NpgsqlInet(IPAddress address) + : this(address, (byte)(address.AddressFamily == AddressFamily.InterNetwork ? 32 : 128)) { - if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) - throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); - - Address = address; - Netmask = address.AddressFamily == AddressFamily.InterNetwork ? 32 : 128; } public NpgsqlInet(string addr) { - if (addr.IndexOf('/') > 0) + switch (addr.Split('/')) { - var addrbits = addr.Split('/'); - if (addrbits.GetUpperBound(0) != 1) { - throw new FormatException("Invalid number of parts in CIDR specification"); - } - Address = IPAddress.Parse(addrbits[0]); - Netmask = int.Parse(addrbits[1]); - } - else - { - Address = IPAddress.Parse(addr); - Netmask = 32; + case { Length: 2 } segments: + (Address, Netmask) = (IPAddress.Parse(segments[0]), byte.Parse(segments[1])); + break; + case { Length: 1 } segments: + var ipAddr = IPAddress.Parse(segments[0]); + CheckAddressFamily(ipAddr); + (Address, Netmask) = ( + ipAddr, + ipAddr.AddressFamily == AddressFamily.InterNetworkV6 ? (byte)128 : (byte)32); + break; + default: + throw new FormatException("Invalid number of parts in CIDR specification"); } } public override string ToString() + => (Address?.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || + (Address?.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128) + ? Address.ToString() + : $"{Address}/{Netmask}"; + + public static explicit operator IPAddress(NpgsqlInet inet) + => inet.Address; + + public static implicit operator NpgsqlInet(IPAddress ip) + => new(ip); + + public static implicit operator NpgsqlInet(IPNetwork cidr) + => new( + cidr.BaseAddress, + cidr.PrefixLength <= byte.MaxValue + ? (byte)cidr.PrefixLength + : throw new ArgumentOutOfRangeException(nameof(cidr), "IPNetwork.PrefixLength is too large to fit in a byte")); + + public void Deconstruct(out IPAddress address, out byte netmask) { - if ((Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || - (Address.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128)) - { - return Address.ToString(); - } - return $"{Address}/{Netmask}"; + address = Address; + netmask = Netmask; } - // ReSharper disable once InconsistentNaming - public static IPAddress ToIPAddress(NpgsqlInet inet) + static void CheckAddressFamily(IPAddress address) { - if (inet.Netmask != 32) - throw new InvalidCastException("Cannot cast CIDR network to address"); - return inet.Address; + if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) + throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); } +} - public static explicit operator IPAddress(NpgsqlInet inet) => ToIPAddress(inet); - - public static NpgsqlInet ToNpgsqlInet(IPAddress? ip) - => ip is null ? default : new NpgsqlInet(ip); - //=> ReferenceEquals(ip, null) ? default : new NpgsqlInet(ip); - - public static implicit operator NpgsqlInet(IPAddress ip) => ToNpgsqlInet(ip); +/// +/// Represents a PostgreSQL cidr type. +/// +/// +/// https://www.postgresql.org/docs/current/static/datatype-net-types.html +/// +[Obsolete("Use .NET IPNetwork instead of NpgsqlCidr to map to PostgreSQL cidr")] +public readonly record struct NpgsqlCidr +{ + public IPAddress Address { get; } + public byte Netmask { get; } - public void Deconstruct(out IPAddress address, out int netmask) + public NpgsqlCidr(IPAddress address, byte netmask) { - address = Address; - netmask = Netmask; + if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) + throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); + + Address = address; + Netmask = netmask; } - public bool Equals(NpgsqlInet other) => Address.Equals(other.Address) && Netmask == other.Netmask; + public NpgsqlCidr(string addr) + => (Address, Netmask) = addr.Split('/') switch + { + { Length: 2 } segments => (IPAddress.Parse(segments[0]), byte.Parse(segments[1])), + { Length: 1 } => throw new FormatException("Missing netmask"), + _ => throw new FormatException("Invalid number of parts in CIDR specification") + }; - public override bool Equals(object? obj) - => obj is NpgsqlInet inet && Equals(inet); + public static implicit operator NpgsqlInet(NpgsqlCidr cidr) + => new(cidr.Address, cidr.Netmask); - public override int GetHashCode() - => PGUtil.RotateShift(Address.GetHashCode(), Netmask%32); + public static explicit operator IPAddress(NpgsqlCidr cidr) + => cidr.Address; + + public override string ToString() + => $"{Address}/{Netmask}"; - public static bool operator ==(NpgsqlInet x, NpgsqlInet y) => x.Equals(y); - public static bool operator !=(NpgsqlInet x, NpgsqlInet y) => !(x == y); + public void Deconstruct(out IPAddress address, out byte netmask) + { + address = Address; + netmask = Netmask; + } } /// @@ -617,23 +583,17 @@ public override int GetHashCode() /// /// https://www.postgresql.org/docs/current/static/datatype-oid.html /// -public readonly struct NpgsqlTid : IEquatable +public readonly struct NpgsqlTid(uint blockNumber, ushort offsetNumber) : IEquatable { /// /// Block number /// - public uint BlockNumber { get; } + public uint BlockNumber { get; } = blockNumber; /// /// Tuple index within block /// - public ushort OffsetNumber { get; } - - public NpgsqlTid(uint blockNumber, ushort offsetNumber) - { - BlockNumber = blockNumber; - OffsetNumber = offsetNumber; - } + public ushort OffsetNumber { get; } = offsetNumber; public bool Equals(NpgsqlTid other) => BlockNumber == other.BlockNumber && OffsetNumber == other.OffsetNumber; @@ -645,6 +605,12 @@ public override bool Equals(object? o) public static bool operator ==(NpgsqlTid left, NpgsqlTid right) => left.Equals(right); public static bool operator !=(NpgsqlTid left, NpgsqlTid right) => !(left == right); public override string ToString() => $"({BlockNumber},{OffsetNumber})"; + + public void Deconstruct(out uint blockNumber, out ushort offsetNumber) + { + blockNumber = BlockNumber; + offsetNumber = OffsetNumber; + } } #pragma warning restore 1591 diff --git a/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs b/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs index 8e284111aa..ffb5f3af48 100644 --- a/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs +++ b/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs @@ -1,4 +1,4 @@ -using System; +using System; // ReSharper disable once CheckNamespace namespace NpgsqlTypes; @@ -18,14 +18,12 @@ public class PgNameAttribute : Attribute /// /// The name of PostgreSQL field that corresponds to this CLR property or field /// - public string PgName { get; private set; } + public string PgName { get; } /// /// Indicates that this property or field corresponds to a PostgreSQL field with the specified name /// /// The name of PostgreSQL field that corresponds to this CLR property or field public PgNameAttribute(string pgName) - { - PgName = pgName; - } + => PgName = pgName; } diff --git a/src/Npgsql/PgPassFile.cs b/src/Npgsql/PgPassFile.cs index 36adf68325..3e1c1605be 100644 --- a/src/Npgsql/PgPassFile.cs +++ b/src/Npgsql/PgPassFile.cs @@ -1,8 +1,7 @@ -using System; +using System; using System.Collections.Generic; using System.IO; -using System.Linq; -using System.Text.RegularExpressions; +using System.Text; namespace Npgsql; @@ -35,10 +34,21 @@ public PgPassFile(string fileName) /// Parses file content and gets all credentials from the file /// /// corresponding to all lines in the .pgpass file - internal IEnumerable Entries => File.ReadLines(FileName) - .Select(line => line.Trim()) - .Where(line => line.Any() && line[0] != '#') - .Select(Entry.Parse); + internal IEnumerable Entries + { + get + { + var bytes = File.ReadAllBytes(FileName); + var mem = new MemoryStream(bytes); + using var reader = new StreamReader(mem); + while (reader.ReadLine() is { } l) + { + var line = l.Trim(); + if (line.Length > 0 && line[0] != '#') + yield return Entry.Parse(line); + } + } + } /// /// Searches queries loaded from .PGPASS file to find first entry matching the provided parameters. @@ -49,15 +59,18 @@ public PgPassFile(string fileName) /// User name to query. Use null to match any. /// Matching if match was found. Otherwise, returns null. internal Entry? GetFirstMatchingEntry(string? host = null, int? port = null, string? database = null, string? username = null) - => Entries.FirstOrDefault(entry => entry.IsMatch(host, port, database, username)); + { + foreach (var entry in Entries) + if (entry.IsMatch(host, port, database, username)) + return entry; + return null; + } /// /// Represents a hostname, port, database, username, and password combination that has been retrieved from a .pgpass file /// internal sealed class Entry { - const string PgPassWildcard = "*"; - #region Fields and Properties /// @@ -110,24 +123,53 @@ internal sealed class Entry /// Entry is not formatted as hostname:port:database:username:password or non-wildcard port is not a number internal static Entry Parse(string serializedEntry) { - var parts = Regex.Split(serializedEntry, @"(?(5); - var processedParts = parts - .Select(part => part.Replace("\\:", ":").Replace("\\\\", "\\")) // unescape any escaped characters - .Select(part => part == PgPassWildcard ? null : part) - .ToArray(); + var builder = new StringBuilder(); + for (var pos = 0; pos < serializedEntry.Length; pos++) + { + var c = serializedEntry[pos]; + + switch (c) + { + case '\\' when pos < serializedEntry.Length - 1: + // Strip backslash before colon or backslash, otherwise preserve it + c = serializedEntry[++pos]; + if (c is not (':' or '\\')) + { + builder.Append('\\'); + } + + builder.Append(c); + continue; + + case ':': + var part = builder.ToString(); + parts.Add(part == "*" ? null : part); + builder.Clear(); + continue; + + default: + builder.Append(c); + continue; + } + } + + var lastPart = builder.ToString(); + parts.Add(lastPart == "*" ? null : lastPart); + + if (parts.Count != 5) + throw new FormatException("pgpass entry was not well-formed. Please ensure all non-comment entries are formatted as hostname:port:database:username:password. If colon is included, it must be escaped like \\:."); int? port = null; - if (processedParts[1] != null) + if (parts[1] != null) { - if (!int.TryParse(processedParts[1], out var tempPort)) + if (!int.TryParse(parts[1], out var tempPort)) throw new FormatException("pgpass entry was not formatted correctly. Port must be a valid integer."); port = tempPort; } - return new Entry(processedParts[0], port, processedParts[2], processedParts[3], processedParts[4]); + return new Entry(parts[0], port, parts[2], parts[3], parts[4]); } #endregion @@ -156,4 +198,4 @@ bool AreValuesMatched(string? query, string? actual) bool AreValuesMatched(int? query, int? actual) => query == actual || actual == null || query == null; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PoolManager.cs b/src/Npgsql/PoolManager.cs index adc3c75fa6..e93d6a856d 100644 --- a/src/Npgsql/PoolManager.cs +++ b/src/Npgsql/PoolManager.cs @@ -1,7 +1,5 @@ -using System; +using System; using System.Collections.Concurrent; -using System.Diagnostics.CodeAnalysis; -using System.Threading; namespace Npgsql; diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index f6a87c9e9b..813fff6b22 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -1,11 +1,10 @@ -using System; +using System; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Channels; using System.Threading.Tasks; -using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Util; @@ -16,8 +15,9 @@ class PoolingDataSource : NpgsqlDataSource { #region Fields and properties - readonly int _max; - readonly int _min; + internal int MaxConnections { get; } + internal int MinConnections { get; } + readonly TimeSpan _connectionLifetime; volatile int _numConnectors; @@ -30,8 +30,6 @@ class PoolingDataSource : NpgsqlDataSource /// private protected readonly NpgsqlConnector?[] Connectors; - readonly NpgsqlMultiHostDataSource? _parentPool; - /// /// Reader side for the idle connector channel. Contains nulls in order to release waiting attempts after /// a connector has been physically closed/broken. @@ -59,8 +57,6 @@ class PoolingDataSource : NpgsqlDataSource volatile int _isClearing; - static readonly SingleThreadSynchronizationContext SingleThreadSynchronizationContext = new("NpgsqlRemainingAsyncSendWorker"); - #endregion internal sealed override (int Total, int Idle, int Busy) Statistics @@ -77,24 +73,21 @@ internal sealed override (int Total, int Idle, int Busy) Statistics internal PoolingDataSource( NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig, - NpgsqlMultiHostDataSource? parentPool = null) - : base(settings, dataSourceConfig) + NpgsqlDataSourceConfiguration dataSourceConfig) + : base(settings, dataSourceConfig, reportMetrics: true) { if (settings.MaxPoolSize < settings.MinPoolSize) throw new ArgumentException($"Connection can't have 'Max Pool Size' {settings.MaxPoolSize} under 'Min Pool Size' {settings.MinPoolSize}"); - _parentPool = parentPool; - // We enforce Max Pool Size, so no need to to create a bounded channel (which is less efficient) - // On the consuming side, we have the multiplexing write loop but also non-multiplexing Rents - // On the producing side, we have connections being released back into the pool (both multiplexing and not) + // On the consuming side, we have Rents + // On the producing side, we have connections being released back into the pool var idleChannel = Channel.CreateUnbounded(); _idleConnectorReader = idleChannel.Reader; IdleConnectorWriter = idleChannel.Writer; - _max = settings.MaxPoolSize; - _min = settings.MinPoolSize; + MaxConnections = settings.MaxPoolSize; + MinConnections = settings.MinPoolSize; if (settings.ConnectionPruningInterval == 0) throw new ArgumentException("ConnectionPruningInterval can't be 0."); @@ -103,7 +96,8 @@ internal PoolingDataSource( if (connectionIdleLifetime < pruningSamplingInterval) throw new ArgumentException($"Connection can't have {nameof(settings.ConnectionIdleLifetime)} {connectionIdleLifetime} under {nameof(settings.ConnectionPruningInterval)} {pruningSamplingInterval}"); - _pruningTimer = new Timer(PruningTimerCallback, this, Timeout.Infinite, Timeout.Infinite); + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + _pruningTimer = new Timer(PruningTimerCallback, this, Timeout.Infinite, Timeout.Infinite); _pruningSampleSize = DivideRoundingUp(settings.ConnectionIdleLifetime, settings.ConnectionPruningInterval); _pruningMedianIndex = DivideRoundingUp(_pruningSampleSize, 2) - 1; // - 1 to go from length to index _pruningSamplingInterval = pruningSamplingInterval; @@ -111,11 +105,13 @@ internal PoolingDataSource( _pruningTimerEnabled = false; _connectionLifetime = TimeSpan.FromSeconds(settings.ConnectionLifetime); - Connectors = new NpgsqlConnector[_max]; + Connectors = new NpgsqlConnector[MaxConnections]; _logger = LoggingConfiguration.ConnectionLogger; } + static SemaphoreSlim SyncOverAsyncSemaphore { get; } = new(Math.Max(1, Environment.ProcessorCount / 2)); + internal sealed override ValueTask Get( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { @@ -129,7 +125,7 @@ async ValueTask RentAsync( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { // First, try to open a new physical connector. This will fail if we're at max capacity. - var connector = await OpenNewConnector(conn, timeout, async, cancellationToken); + var connector = await OpenNewConnector(conn, timeout, async, cancellationToken).ConfigureAwait(false); if (connector != null) return connector; @@ -139,56 +135,69 @@ async ValueTask RentAsync( using var linkedSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); var finalToken = linkedSource.Token; linkedSource.CancelAfter(timeout.CheckAndGetTimeLeft()); + MetricsReporter.ReportPendingConnectionRequestStart(); - while (true) + try { - try + while (true) { - if (async) + try { - connector = await _idleConnectorReader.ReadAsync(finalToken); + if (async) + connector = await _idleConnectorReader.ReadAsync(finalToken).ConfigureAwait(false); + else + { + SyncOverAsyncSemaphore.Wait(finalToken); + try + { + var awaiter = _idleConnectorReader.ReadAsync(finalToken).ConfigureAwait(false).GetAwaiter(); + var mres = new ManualResetEventSlim(false, 0); + + // Cancellation happens through the ReadAsync call, which will complete the task. + awaiter.UnsafeOnCompleted(() => mres.Set()); + mres.Wait(CancellationToken.None); + connector = awaiter.GetResult(); + } + finally + { + SyncOverAsyncSemaphore.Release(); + } + } + if (CheckIdleConnector(connector)) return connector; } - else + catch (OperationCanceledException) { - // Channels don't have a sync API. To avoid sync-over-async issues, we use a special single- - // thread synchronization context which ensures that callbacks are executed on a dedicated - // thread. - // Note that AsTask isn't safe here for getting the result, since it still causes some continuation code - // to get executed on the TP (which can cause deadlocks). - using (SingleThreadSynchronizationContext.Enter()) - using (var mre = new ManualResetEventSlim()) - { - _idleConnectorReader.WaitToReadAsync(finalToken).GetAwaiter().OnCompleted(() => mre.Set()); - mre.Wait(finalToken); - } + cancellationToken.ThrowIfCancellationRequested(); + Debug.Assert(finalToken.IsCancellationRequested); + + MetricsReporter.ReportConnectionPoolTimeout(); + throw new NpgsqlException( + $"The connection pool has been exhausted, either raise 'Max Pool Size' (currently {MaxConnections}) " + + $"or 'Timeout' (currently {Settings.Timeout} seconds) in your connection string.", + new TimeoutException()); + } + catch (ChannelClosedException) + { + throw new NpgsqlException("The connection pool has been shut down."); } - } - catch (OperationCanceledException) - { - cancellationToken.ThrowIfCancellationRequested(); - Debug.Assert(finalToken.IsCancellationRequested); - throw new NpgsqlException( - $"The connection pool has been exhausted, either raise 'Max Pool Size' (currently {_max}) " + - $"or 'Timeout' (currently {Settings.Timeout} seconds) in your connection string.", - new TimeoutException()); - } - catch (ChannelClosedException) - { - throw new NpgsqlException("The connection pool has been shut down."); - } - // If we're here, our waiting attempt on the idle connector channel was released with a null - // (or bad connector), or we're in sync mode. Check again if a new idle connector has appeared since we last checked. - if (TryGetIdleConnector(out connector)) - return connector; + // If we're here, our waiting attempt on the idle connector channel was released with a null + // (or bad connector), or we're in sync mode. Check again if a new idle connector has appeared since we last checked. + if (TryGetIdleConnector(out connector)) + return connector; - // We might have closed a connector in the meantime and no longer be at max capacity - // so try to open a new connector and if that fails, loop again. - connector = await OpenNewConnector(conn, timeout, async, cancellationToken); - if (connector != null) - return connector; + // We might have closed a connector in the meantime and no longer be at max capacity + // so try to open a new connector and if that fails, loop again. + connector = await OpenNewConnector(conn, timeout, async, cancellationToken).ConfigureAwait(false); + if (connector != null) + return connector; + } + } + finally + { + MetricsReporter.ReportPendingConnectionRequestStop(); } } } @@ -229,19 +238,12 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) return false; } - // The connector directly references the data source type mapper into the connector, to protect it against changes by a concurrent + // The connector directly references the current reloadable state reference, to protect it against changes by a concurrent // ReloadTypes. We update them here before returning the connector from the pool. - Debug.Assert(TypeMapper is not null); - Debug.Assert(DatabaseInfo is not null); - connector.TypeMapper = TypeMapper; - connector.DatabaseInfo = DatabaseInfo; + connector.ReloadableState = CurrentReloadableState; Debug.Assert(connector.State == ConnectorState.Ready, $"Got idle connector but {nameof(connector.State)} is {connector.State}"); - Debug.Assert(connector.CommandsInFlightCount == 0, - $"Got idle connector but {nameof(connector.CommandsInFlightCount)} is {connector.CommandsInFlightCount}"); - Debug.Assert(connector.MultiplexAsyncWritingLock == 0, - $"Got idle connector but {nameof(connector.MultiplexAsyncWritingLock)} is 1"); return true; } @@ -250,7 +252,7 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { // As long as we're under max capacity, attempt to increase the connector count and open a new connection. - for (var numConnectors = _numConnectors; numConnectors < _max; numConnectors = _numConnectors) + for (var numConnectors = _numConnectors; numConnectors < MaxConnections; numConnectors = _numConnectors) { // Note that we purposefully don't use SpinWait for this: https://github.com/dotnet/coreclr/pull/21437 if (Interlocked.CompareExchange(ref _numConnectors, numConnectors + 1, numConnectors) != numConnectors) @@ -259,22 +261,24 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) try { // We've managed to increase the open counter, open a physical connections. + var startTime = Stopwatch.GetTimestamp(); var connector = new NpgsqlConnector(this, conn) { ClearCounter = _clearCounter }; - await connector.Open(timeout, async, cancellationToken); + await connector.Open(timeout, async, cancellationToken).ConfigureAwait(false); + MetricsReporter.ReportConnectionCreateTime(Stopwatch.GetElapsedTime(startTime)); var i = 0; - for (; i < _max; i++) + for (; i < MaxConnections; i++) if (Interlocked.CompareExchange(ref Connectors[i], connector, null) == null) break; - Debug.Assert(i < _max, $"Could not find free slot in {Connectors} when opening."); - if (i == _max) + Debug.Assert(i < MaxConnections, $"Could not find free slot in {Connectors} when opening."); + if (i == MaxConnections) throw new NpgsqlException($"Could not find free slot in {Connectors} when opening. Please report a bug."); // Only start pruning if we've incremented open count past _min. // Note that we don't do it only once, on equality, because the thread which incremented open count past _min might get exception // on NpgsqlConnector.Open due to timeout, CancellationToken or other reasons. - if (numConnectors >= _min) + if (numConnectors >= MinConnections) UpdatePruningTimer(); return connector; @@ -302,8 +306,6 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) internal sealed override void Return(NpgsqlConnector connector) { Debug.Assert(!connector.InTransaction); - Debug.Assert(connector.MultiplexAsyncWritingLock == 0 || connector.IsBroken || connector.IsClosed, - $"About to return multiplexing connector to the pool, but {nameof(connector.MultiplexAsyncWritingLock)} is {connector.MultiplexAsyncWritingLock}"); // If Clear/ClearAll has been been called since this connector was first opened, // throw it away. The same if it's broken (in which case CloseConnector is only @@ -320,7 +322,7 @@ internal sealed override void Return(NpgsqlConnector connector) Debug.Assert(written); } - internal override void Clear() + public override void Clear() { Interlocked.Increment(ref _clearCounter); @@ -357,14 +359,14 @@ void CloseConnector(NpgsqlConnector connector) } var i = 0; - for (; i < _max; i++) + for (; i < MaxConnections; i++) if (Interlocked.CompareExchange(ref Connectors[i], null, connector) == connector) break; // If CloseConnector is being called from within OpenNewConnector (e.g. an error happened during a connection initializer which // causes the connector to Break, and therefore return the connector), then we haven't yet added the connector to Connectors. // In this case, there's no state to revert here (that's all taken care of in OpenNewConnector), skip it. - if (i == _max) + if (i == MaxConnections) return; var numConnectors = Interlocked.Decrement(ref _numConnectors); @@ -376,15 +378,10 @@ void CloseConnector(NpgsqlConnector connector) IdleConnectorWriter.TryWrite(null); // Only turn off the timer one time, when it was this Close that brought Open back to _min. - if (numConnectors == _min) + if (numConnectors == MinConnections) UpdatePruningTimer(); } - internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => _parentPool is null - ? base.TryRemovePendingEnlistedConnector(connector, transaction) - : _parentPool.TryRemovePendingEnlistedConnector(connector, transaction); - #region Pruning void UpdatePruningTimer() @@ -392,12 +389,12 @@ void UpdatePruningTimer() lock (_pruningTimer) { var numConnectors = _numConnectors; - if (numConnectors > _min && !_pruningTimerEnabled) + if (numConnectors > MinConnections && !_pruningTimerEnabled) { _pruningTimerEnabled = true; _pruningTimer.Change(_pruningSamplingInterval, Timeout.InfiniteTimeSpan); } - else if (numConnectors <= _min && _pruningTimerEnabled) + else if (numConnectors <= MinConnections && _pruningTimerEnabled) { _pruningTimer.Change(Timeout.Infinite, Timeout.Infinite); _pruningSampleIndex = 0; @@ -434,15 +431,14 @@ static void PruneIdleConnectors(object? state) } while (toPrune > 0 && - pool._numConnectors > pool._min && + pool._numConnectors > pool.MinConnections && pool._idleConnectorReader.TryRead(out var connector) && connector != null) { if (pool.CheckIdleConnector(connector)) - { pool.CloseConnector(connector); - toPrune--; - } + + toPrune--; } } diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index a8a82fccd4..7ffe93037e 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -1,16 +1,15 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; -using System.Linq; -using System.Runtime.CompilerServices; using System.Text; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; using Npgsql.BackendMessages; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; using Npgsql.Util; using static Npgsql.Util.Statics; @@ -28,7 +27,7 @@ sealed class PostgresDatabaseInfoFactory : INpgsqlDatabaseInfoFactory public async Task Load(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) { var db = new PostgresDatabaseInfo(conn); - await db.LoadPostgresInfo(conn, timeout, async); + await db.LoadPostgresInfo(conn, timeout, async).ConfigureAwait(false); Debug.Assert(db.LongVersion != null); return db; } @@ -46,19 +45,23 @@ class PostgresDatabaseInfo : NpgsqlDatabaseInfo /// List? _types; + bool? _isRedshift; + /// - protected override IEnumerable GetTypes() => _types ?? Enumerable.Empty(); + protected override IEnumerable GetTypes() => _types ?? (IEnumerable)Array.Empty(); /// /// The PostgreSQL version string as returned by the version() function. Populated during loading. /// - public string LongVersion { get; set; } = default!; + public string LongVersion { get; set; } = ""; /// /// True if the backend is Amazon Redshift; otherwise, false. /// - public bool IsRedshift { get; private set; } + public bool IsRedshift => _isRedshift ??= LongVersion.Contains("redshift", StringComparison.OrdinalIgnoreCase); + // Note that UNLISTEN is only needed for the reset message, but those don't get generated for Redshift anyway because e.g. DISCARD + // isn't supported there either. So the IsRedshift check isn't actually used, but is here for completeness. /// public override bool SupportsUnlisten => Version.IsGreaterOrEqual(6, 4) && !IsRedshift; @@ -80,6 +83,10 @@ internal PostgresDatabaseInfo(NpgsqlConnector conn) : base(conn.Host!, conn.Port, conn.Database!, conn.PostgresParameters["server_version"]) => _connectionLogger = conn.LoggingConfiguration.ConnectionLogger; + private protected PostgresDatabaseInfo(string host, int port, string databaseName, string serverVersion) + : base(host, port, databaseName, serverVersion) + => _connectionLogger = NullLogger.Instance; + /// /// Loads database information from the PostgreSQL database specified by . /// @@ -95,10 +102,11 @@ internal async Task LoadPostgresInfo(NpgsqlConnector conn, NpgsqlTimeout timeout conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) && intDateTimes == "on"; - IsRedshift = conn.Settings.ServerCompatibilityMode == ServerCompatibilityMode.Redshift; - _types = await LoadBackendTypes(conn, timeout, async); + _types = await LoadBackendTypes(conn, timeout, async).ConfigureAwait(false); } + const string BuiltinSchemaListSqlFragment = "'pg_catalog', 'information_schema', 'pg_toast'"; + /// /// Generates a raw SQL query string to select type information. /// @@ -109,7 +117,7 @@ internal async Task LoadPostgresInfo(NpgsqlConnector conn, NpgsqlTimeout timeout /// For arrays and ranges, join in the element OID and type (to filter out arrays of unhandled /// types). /// - static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool loadTableComposites) + static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool loadTableComposites, string? schemaListSqlFragment, bool hasTypeCategory) => $@" SELECT ns.nspname, t.oid, t.typname, t.typtype, t.typnotnull, t.elemtypoid FROM ( @@ -120,6 +128,7 @@ static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool l typ.oid, typ.typnamespace, typ.typname, typ.typtype, typ.typrelid, typ.typnotnull, typ.relkind, elemtyp.oid AS elemtypoid, elemtyp.typname AS elemtypname, elemcls.relkind AS elemrelkind, CASE WHEN elemproc.proname='array_recv' THEN 'a' ELSE elemtyp.typtype END AS elemtyptype + {(hasTypeCategory ? ", typ.typcategory" : "")} FROM ( SELECT typ.oid, typnamespace, typname, typrelid, typnotnull, relkind, typelem AS elemoid, CASE WHEN proc.proname='array_recv' THEN 'a' ELSE typ.typtype END AS typtype, @@ -129,6 +138,7 @@ static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool l {(withMultirange ? "WHEN typ.typtype='m' THEN (SELECT rngtypid FROM pg_range WHERE rngmultitypid = typ.oid)" : "")} WHEN typ.typtype='d' THEN typ.typbasetype END AS elemtypoid + {(hasTypeCategory ? ", typ.typcategory" : "")} FROM pg_type AS typ LEFT JOIN pg_class AS cls ON (cls.oid = typ.typrelid) LEFT JOIN pg_proc AS proc ON proc.oid = typ.typreceive @@ -140,25 +150,26 @@ LEFT JOIN pg_class AS elemcls ON (elemcls.oid = elemtyp.typrelid) ) AS t JOIN pg_namespace AS ns ON (ns.oid = typnamespace) WHERE - typtype IN ('b', 'r', 'm', 'e', 'd') OR -- Base, range, multirange, enum, domain - (typtype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "relkind='c'")}) OR -- User-defined free-standing composites (not table composites) by default - (typtype = 'p' AND typname IN ('record', 'void')) OR -- Some special supported pseudo-types + {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({schemaListSqlFragment}){(hasTypeCategory ? " OR typcategory = 'U'" : "" )}) AND " : "")} + (typtype IN ('b', 'r', 'm', 'e', 'd') OR -- Base, range, multirange, enum, domain + (typtype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "relkind='c'")}) OR -- User-defined free-standing composites (not table composites) by default + (typtype = 'p' AND typname IN ('record', 'void', 'unknown')) OR -- Some special supported pseudo-types (typtype = 'a' AND ( -- Array of... elemtyptype IN ('b', 'r', 'm', 'e', 'd') OR -- Array of base, range, multirange, enum, domain (elemtyptype = 'p' AND elemtypname IN ('record', 'void')) OR -- Arrays of special supported pseudo-types - (elemtyptype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "elemrelkind='c'")}) -- Array of user-defined free-standing composites (not table composites) by default - )) + (elemtyptype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "elemrelkind='c'")}) -- Array of user-defined free-standing composites (not table composites) by default + ))) ORDER BY CASE WHEN typtype IN ('b', 'e', 'p') THEN 0 -- First base types, enums, pseudo-types - WHEN typtype = 'r' THEN 1 -- Ranges after - WHEN typtype = 'm' THEN 2 -- Multiranges after - WHEN typtype = 'c' THEN 3 -- Composites after + WHEN typtype = 'c' THEN 1 -- Composites after (fields loaded later in 2nd pass) + WHEN typtype = 'r' THEN 2 -- Ranges after + WHEN typtype = 'm' THEN 3 -- Multiranges after WHEN typtype = 'd' AND elemtyptype <> 'a' THEN 4 -- Domains over non-arrays after WHEN typtype = 'a' THEN 5 -- Arrays after WHEN typtype = 'd' AND elemtyptype = 'a' THEN 6 -- Domains over arrays last END;"; - static string GenerateLoadCompositeTypesQuery(bool loadTableComposites) + static string GenerateLoadCompositeTypesQuery(bool loadTableComposites, string? schemaListSqlFragment) => $@" -- Load field definitions for (free-standing) composite types SELECT typ.oid, att.attname, att.atttypid @@ -167,17 +178,20 @@ JOIN pg_namespace AS ns ON (ns.oid = typ.typnamespace) JOIN pg_class AS cls ON (cls.oid = typ.typrelid) JOIN pg_attribute AS att ON (att.attrelid = typ.typrelid) WHERE - (typ.typtype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "cls.relkind='c'")}) AND + (typ.typtype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "cls.relkind='c'")}) AND + {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({schemaListSqlFragment})) AND " : "")} attnum > 0 AND -- Don't load system attributes NOT attisdropped ORDER BY typ.oid, att.attnum;"; - static string GenerateLoadEnumFieldsQuery(bool withEnumSortOrder) + static string GenerateLoadEnumFieldsQuery(bool withEnumSortOrder, string? schemaListSqlFragment) => $@" -- Load enum fields -SELECT pg_type.oid, enumlabel +SELECT typ.oid, enumlabel FROM pg_enum -JOIN pg_type ON pg_type.oid=enumtypid +JOIN pg_type AS typ ON typ.oid = enumtypid +JOIN pg_namespace AS ns ON ns.oid = typ.typnamespace +{(schemaListSqlFragment is not null ? $"WHERE (ns.nspname IN ({schemaListSqlFragment}))" : "")} ORDER BY oid{(withEnumSortOrder ? ", enumsortorder" : "")};"; /// @@ -194,10 +208,31 @@ FROM pg_enum internal async Task> LoadBackendTypes(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) { var versionQuery = "SELECT version();"; - var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, conn.Settings.LoadTableComposites); - var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(conn.Settings.LoadTableComposites); + var typeLoading = conn.DataSource.Configuration.TypeLoading; + var loadTableComposites = typeLoading.LoadTableComposites; + + // Escape the schemas configured by the user, we need these as literals to be used in an IN() operator, and we cannot use parameters. + // Add an opening quote, escape any quotes in the schema, and add a closing quote. + string? schemaListSqlFragment = null; + if (typeLoading.TypeLoadingSchemas is not null) + { + var builder = new StringBuilder(BuiltinSchemaListSqlFragment); + for (var i = 0; i < typeLoading.TypeLoadingSchemas.Length; i++) + { + builder.Append(", "); + var schema = typeLoading.TypeLoadingSchemas[i]; + builder.Append('\''); + builder.Append(EscapeLiteral(schema)); + builder.Append('\''); + } + + schemaListSqlFragment = builder.ToString(); + } + + var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, loadTableComposites, schemaListSqlFragment, HasTypeCategory); + var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(loadTableComposites, schemaListSqlFragment); var loadEnumFieldsQuery = SupportsEnumTypes - ? GenerateLoadEnumFieldsQuery(HasEnumSortOrder) + ? GenerateLoadEnumFieldsQuery(HasEnumSortOrder, schemaListSqlFragment) : string.Empty; timeout.CheckAndApply(conn); @@ -215,11 +250,11 @@ internal async Task> LoadBackendTypes(NpgsqlConnector conn, N var isReplicationConnection = conn.Settings.ReplicationMode != ReplicationMode.Off; if (isReplicationConnection) { - await conn.WriteQuery(versionQuery, async); - await conn.WriteQuery(SanitizeForReplicationConnection(loadTypesQuery), async); - await conn.WriteQuery(SanitizeForReplicationConnection(loadCompositeTypesQuery), async); + await conn.WriteQuery(versionQuery, async).ConfigureAwait(false); + await conn.WriteQuery(SanitizeForReplicationConnection(loadTypesQuery), async).ConfigureAwait(false); + await conn.WriteQuery(SanitizeForReplicationConnection(loadCompositeTypesQuery), async).ConfigureAwait(false); if (SupportsEnumTypes) - await conn.WriteQuery(SanitizeForReplicationConnection(loadEnumFieldsQuery), async); + await conn.WriteQuery(SanitizeForReplicationConnection(loadEnumFieldsQuery), async).ConfigureAwait(false); static string SanitizeForReplicationConnection(string str) { @@ -295,136 +330,77 @@ static string SanitizeForReplicationConnection(string str) if (SupportsEnumTypes) batchQuery.AppendLine(loadEnumFieldsQuery); - await conn.WriteQuery(batchQuery.ToString(), async); + await conn.WriteQuery(batchQuery.ToString(), async).ConfigureAwait(false); } - await conn.Flush(async); + await conn.Flush(async).ConfigureAwait(false); var byOID = new Dictionary(); - var buf = conn.ReadBuffer; // First read the PostgreSQL version - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // We read the message in non-sequential mode which buffers the whole message. // There is no need to ensure data within the message boundaries - Expect(await conn.ReadMessage(async), conn); - buf.Skip(2); // Column count - LongVersion = ReadNonNullableString(buf); - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); + // Note that here and below we don't assign ReadBuffer to a variable + // because we might allocate oversize buffer + conn.ReadBuffer.Skip(2); // Column count + LongVersion = ReadNonNullableString(conn.ReadBuffer); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // Then load the types - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); IBackendMessage msg; + var unknownPostgresTypes = new List(); while (true) { - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg is not DataRowMessage) break; - buf.Skip(2); // Column count - var nspname = ReadNonNullableString(buf); - var oid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); + conn.ReadBuffer.Skip(2); // Column count + var nspname = ReadNonNullableString(conn.ReadBuffer); + var oid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); Debug.Assert(oid != 0); - var typname = ReadNonNullableString(buf); - var typtype = ReadNonNullableString(buf)[0]; - var typnotnull = ReadNonNullableString(buf)[0] == 't'; - var len = buf.ReadInt32(); - var elemtypoid = len == -1 ? 0 : uint.Parse(buf.ReadString(len), NumberFormatInfo.InvariantInfo); - - switch (typtype) - { - case 'b': // Normal base type - var baseType = new PostgresBaseType(nspname, typname, oid); - byOID[baseType.OID] = baseType; - continue; + var typname = ReadNonNullableString(conn.ReadBuffer); + var typtype = ReadNonNullableString(conn.ReadBuffer)[0]; + var typnotnull = ReadNonNullableString(conn.ReadBuffer)[0] == 't'; + var len = conn.ReadBuffer.ReadInt32(); + var elemtypoid = len == -1 ? 0 : uint.Parse(conn.ReadBuffer.ReadString(len), NumberFormatInfo.InvariantInfo); + + var postgresTypeDefinition = new PostgresTypeDefinition(nspname, oid, typname, typtype, typnotnull, elemtypoid); + if (!TryAddPostgresType(postgresTypeDefinition, byOID)) + unknownPostgresTypes.Add(postgresTypeDefinition); + } - case 'a': // Array + while (unknownPostgresTypes.Count > 0) + { + var hasChanges = false; + for (var i = unknownPostgresTypes.Count - 1; i >= 0; i--) { - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var elementPostgresType)) + var unknownPostgresType = unknownPostgresTypes[i]; + if (TryAddPostgresType(unknownPostgresType, byOID)) { - _connectionLogger.LogTrace("Array type '{ArrayTypeName}' refers to unknown element with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; + unknownPostgresTypes.RemoveAt(i); + hasChanges = true; } - - var arrayType = new PostgresArrayType(nspname, typname, oid, elementPostgresType); - byOID[arrayType.OID] = arrayType; - continue; } - case 'r': // Range + if (!hasChanges) { - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var subtypePostgresType)) - { - _connectionLogger.LogTrace("Range type '{RangeTypeName}' refers to unknown subtype with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; - } - - var rangeType = new PostgresRangeType(nspname, typname, oid, subtypePostgresType); - byOID[rangeType.OID] = rangeType; - continue; - } - - case 'm': // Multirange - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var type)) - { - _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to unknown range with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; - } - - if (type is not PostgresRangeType rangePostgresType) - { - _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to non-range type '{TypeName}', skipping", - typname, type.Name); - continue; - } - - var multirangeType = new PostgresMultirangeType(nspname, typname, oid, rangePostgresType); - byOID[multirangeType.OID] = multirangeType; - continue; - - case 'e': // Enum - var enumType = new PostgresEnumType(nspname, typname, oid); - byOID[enumType.OID] = enumType; - continue; - - case 'c': // Composite - var compositeType = new PostgresCompositeType(nspname, typname, oid); - byOID[compositeType.OID] = compositeType; - continue; - - case 'd': // Domain - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var basePostgresType)) - { - _connectionLogger.LogTrace("Domain type '{DomainTypeName}' refers to unknown base type with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; - } - - var domainType = new PostgresDomainType(nspname, typname, oid, basePostgresType, typnotnull); - byOID[domainType.OID] = domainType; - continue; - - case 'p': // pseudo-type (record, void) - goto case 'b'; // Hack this as a base type - - default: - throw new ArgumentOutOfRangeException($"Unknown typtype for type '{typname}' in pg_type: {typtype}"); + _connectionLogger.LogWarning("Unable to load '{UnknownTypeCount}' Postgres types while loading database info.", + unknownPostgresTypes.Count); + break; } } + Expect(msg, conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // Then load the composite type fields - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); var currentOID = uint.MaxValue; PostgresCompositeType? currentComposite = null; @@ -432,14 +408,14 @@ static string SanitizeForReplicationConnection(string str) while (true) { - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg is not DataRowMessage) break; - buf.Skip(2); // Column count - var oid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); - var attname = ReadNonNullableString(buf); - var atttypid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); + conn.ReadBuffer.Skip(2); // Column count + var oid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); + var attname = ReadNonNullableString(conn.ReadBuffer); + var atttypid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); if (oid != currentOID) { @@ -481,12 +457,12 @@ static string SanitizeForReplicationConnection(string str) } Expect(msg, conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); if (SupportsEnumTypes) { // Then load the enum fields - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); currentOID = uint.MaxValue; PostgresEnumType? currentEnum = null; @@ -494,13 +470,13 @@ static string SanitizeForReplicationConnection(string str) while (true) { - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg is not DataRowMessage) break; - buf.Skip(2); // Column count - var oid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); - var enumlabel = ReadNonNullableString(buf); + conn.ReadBuffer.Skip(2); // Column count + var oid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); + var enumlabel = ReadNonNullableString(conn.ReadBuffer); if (oid != currentOID) { currentOID = oid; @@ -532,14 +508,110 @@ static string SanitizeForReplicationConnection(string str) } Expect(msg, conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); } if (!isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); - return byOID.Values.ToList(); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); + + return [..byOID.Values]; static string ReadNonNullableString(NpgsqlReadBuffer buffer) => buffer.ReadString(buffer.ReadInt32()); + + bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionary byOID) + { + switch (postgresTypeDefinition.Type) + { + case 'b': // Normal base type + var baseType = new PostgresBaseType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID); + byOID[baseType.OID] = baseType; + return true; + + case 'a': // Array + { + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var elementPostgresType)) + { + _connectionLogger.LogTrace("Array type '{ArrayTypeName}' refers to unknown element with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + var arrayType = new PostgresArrayType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, elementPostgresType); + byOID[arrayType.OID] = arrayType; + return true; + } + + case 'r': // Range + { + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var subtypePostgresType)) + { + _connectionLogger.LogTrace("Range type '{RangeTypeName}' refers to unknown subtype with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + var rangeType = new PostgresRangeType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, subtypePostgresType); + byOID[rangeType.OID] = rangeType; + return true; + } + + case 'm': // Multirange + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var type)) + { + _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to unknown range with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + if (type is not PostgresRangeType rangePostgresType) + { + _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to non-range type '{TypeName}', skipping", + postgresTypeDefinition.Name, type.Name); + return false; + } + + var multirangeType = new PostgresMultirangeType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, rangePostgresType); + byOID[multirangeType.OID] = multirangeType; + return true; + + case 'e': // Enum + var enumType = new PostgresEnumType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID); + byOID[enumType.OID] = enumType; + return true; + + case 'c': // Composite + var compositeType = new PostgresCompositeType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID); + byOID[compositeType.OID] = compositeType; + return true; + + case 'd': // Domain + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var basePostgresType)) + { + _connectionLogger.LogTrace("Domain type '{DomainTypeName}' refers to unknown base type with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + var domainType = new PostgresDomainType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, basePostgresType, postgresTypeDefinition.NotNull); + byOID[domainType.OID] = domainType; + return true; + + case 'p': // pseudo-type (record, void) + goto case 'b'; // Hack this as a base type + + default: + throw new ArgumentOutOfRangeException($"Unknown typtype for type '{postgresTypeDefinition.Name}' in pg_type: {postgresTypeDefinition.Type}"); + } + } } -} \ No newline at end of file +} + +readonly record struct PostgresTypeDefinition(string Namespace, uint OID, string Name, char Type, bool NotNull, uint ElemTypeOID) +{ + public DataTypeName DataTypeName => DataTypeName.CreateFullyQualifiedName(Namespace + "." + Name); +} diff --git a/src/Npgsql/PostgresEnvironment.cs b/src/Npgsql/PostgresEnvironment.cs index 69036601e5..3ba874ae4c 100644 --- a/src/Npgsql/PostgresEnvironment.cs +++ b/src/Npgsql/PostgresEnvironment.cs @@ -48,6 +48,14 @@ internal static string? SslCertRootDefault internal static string? TargetSessionAttributes => Environment.GetEnvironmentVariable("PGTARGETSESSIONATTRS"); + internal static string? SslNegotiation => Environment.GetEnvironmentVariable("PGSSLNEGOTIATION"); + + internal static string? GssEncryptionMode => Environment.GetEnvironmentVariable("PGGSSENCMODE"); + + internal static string? RequireAuth => Environment.GetEnvironmentVariable("PGREQUIREAUTH"); + + internal static string? AppName => Environment.GetEnvironmentVariable("PGAPPNAME"); + static string? GetHomeDir() => Environment.GetEnvironmentVariable(RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "APPDATA" : "HOME"); @@ -55,4 +63,4 @@ internal static string? SslCertRootDefault => GetHomeDir() is string homedir ? Path.Combine(homedir, RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "postgresql" : ".postgresql") : null; -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresErrorCodes.cs b/src/Npgsql/PostgresErrorCodes.cs index 4dbbf904d8..258d89ca45 100644 --- a/src/Npgsql/PostgresErrorCodes.cs +++ b/src/Npgsql/PostgresErrorCodes.cs @@ -1,7 +1,6 @@ -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member using System; -using System.Linq; namespace Npgsql; @@ -390,6 +389,7 @@ public static class PostgresErrorCodes public const string CrashShutdown = "57P02"; public const string CannotConnectNow = "57P03"; public const string DatabaseDropped = "57P04"; + public const string IdleSessionTimeout = "57P05"; #endregion Class 57 - Operator Intervention @@ -466,17 +466,23 @@ public static class PostgresErrorCodes #endregion Class XX - Internal Error static readonly string[] CriticalFailureCodes = - { + [ "53", // Insufficient resources AdminShutdown, // Self explanatory CrashShutdown, // Self explanatory CannotConnectNow, // Database is starting up "58", // System errors, external to PG (server is dying) "F0", // Configuration file error - "XX", // Internal error (database is dying) - }; + "XX" // Internal error (database is dying) + ]; internal static bool IsCriticalFailure(PostgresException e, bool clusterError = true) - => CriticalFailureCodes.Any(x => e.SqlState.StartsWith(x, StringComparison.Ordinal)) || - !clusterError && e.SqlState == ProtocolViolation; // We only treat ProtocolViolation as critical for connection -} \ No newline at end of file + { + foreach (var x in CriticalFailureCodes) + if (e.SqlState.StartsWith(x, StringComparison.Ordinal)) + return true; + + // We only treat ProtocolViolation as critical for connection + return !clusterError && e.SqlState == ProtocolViolation; + } +} diff --git a/src/Npgsql/PostgresException.cs b/src/Npgsql/PostgresException.cs index b5ebe5b99b..c4ee7ec691 100644 --- a/src/Npgsql/PostgresException.cs +++ b/src/Npgsql/PostgresException.cs @@ -110,6 +110,7 @@ static string GetMessage(string sqlState, string messageText, int position, stri internal static PostgresException Load(NpgsqlReadBuffer buf, bool includeDetail, ILogger exceptionLogger) => new(ErrorOrNoticeMessage.Load(buf, includeDetail, exceptionLogger)); + [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] internal PostgresException(SerializationInfo info, StreamingContext context) : base(info, context) { @@ -140,6 +141,7 @@ internal PostgresException(SerializationInfo info, StreamingContext context) /// /// The to populate with data. /// The destination (see ) for this serialization. + [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] public override void GetObjectData(SerializationInfo info, StreamingContext context) { base.GetObjectData(info, context); @@ -225,6 +227,9 @@ public override bool IsTransient case PostgresErrorCodes.SqlClientUnableToEstablishSqlConnection: case PostgresErrorCodes.SqlServerRejectedEstablishmentOfSqlConnection: case PostgresErrorCodes.TransactionResolutionUnknown: + case PostgresErrorCodes.AdminShutdown: + case PostgresErrorCodes.CrashShutdown: + case PostgresErrorCodes.IdleSessionTimeout: return true; default: return false; @@ -254,22 +259,7 @@ public override bool IsTransient /// Constants are defined in . /// See https://www.postgresql.org/docs/current/static/errcodes-appendix.html /// -#if NET5_0_OR_GREATER public override string SqlState { get; } -#else - public string SqlState { get; } -#endif - - /// - /// The SQLSTATE code for the error. - /// - /// - /// Always present. - /// Constants are defined in . - /// See https://www.postgresql.org/docs/current/static/errcodes-appendix.html - /// - [Obsolete("Use SqlState instead")] - public string Code => SqlState; /// /// The primary human-readable error message. This should be accurate but terse. @@ -371,4 +361,4 @@ public override bool IsTransient public string? Routine { get; } #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index 31b2d24f1d..dc4906838b 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -1,11 +1,9 @@ -using System.Collections.Generic; -using System.Linq; -using System.Reflection; +using System.Collections.Generic; using System.Threading.Tasks; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Util; -using NpgsqlTypes; namespace Npgsql; @@ -13,7 +11,7 @@ sealed class PostgresMinimalDatabaseInfoFactory : INpgsqlDatabaseInfoFactory { public Task Load(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) => Task.FromResult( - conn.Settings.ServerCompatibilityMode == ServerCompatibilityMode.NoTypeLoading + !conn.DataSource.Configuration.TypeLoading.LoadTypes ? (NpgsqlDatabaseInfo)new PostgresMinimalDatabaseInfo(conn) : null); } @@ -23,30 +21,104 @@ sealed class PostgresMinimalDatabaseInfo : PostgresDatabaseInfo static PostgresType[]? _typesWithMultiranges, _typesWithoutMultiranges; static PostgresType[] CreateTypes(bool withMultiranges) - => typeof(NpgsqlDbType).GetFields() - .Select(f => f.GetCustomAttribute()) - .OfType() - .SelectMany(attr => + { + var types = new List(); + + Add(DataTypeNames.Int2, oid: 21, arrayOid: 1005); + AddWithRange(DataTypeNames.Int4, oid: 23, arrayOid: 1007, + rangeName: DataTypeNames.Int4Range, rangeOid: 3904, rangeArrayOid: 3905, multirangeOid: 4451, multirangeArrayOid: 6150); + Add(DataTypeNames.Int8, oid: 20, arrayOid: 1016); + AddWithRange(DataTypeNames.Int8, oid: 20, arrayOid: 1016, + rangeName: DataTypeNames.Int8Range, rangeOid: 3926, rangeArrayOid: 3927, multirangeOid: 4536, multirangeArrayOid: 6157); + Add(DataTypeNames.Float4, oid: 700, arrayOid: 1021); + Add(DataTypeNames.Float8, oid: 701, arrayOid: 1022); + AddWithRange(DataTypeNames.Numeric, oid: 1700, arrayOid: 1231, + rangeName: DataTypeNames.NumRange, rangeOid: 3906, rangeArrayOid: 3907, multirangeOid: 4532, multirangeArrayOid: 6151); + Add(DataTypeNames.Money, oid: 790, arrayOid: 791); + Add(DataTypeNames.Bool, oid: 16, arrayOid: 1000); + Add(DataTypeNames.Box, oid: 603, arrayOid: 1020); + Add(DataTypeNames.Circle, oid: 718, arrayOid: 719); + Add(DataTypeNames.Line, oid: 628, arrayOid: 629); + Add(DataTypeNames.LSeg, oid: 601, arrayOid: 1018); + Add(DataTypeNames.Path, oid: 602, arrayOid: 1019); + Add(DataTypeNames.Point, oid: 600, arrayOid: 1017); + Add(DataTypeNames.Polygon, oid: 604, arrayOid: 1027); + Add(DataTypeNames.Bpchar, oid: 1042, arrayOid: 1014); + Add(DataTypeNames.Text, oid: 25, arrayOid: 1009); + Add(DataTypeNames.Varchar, oid: 1043, arrayOid: 1015); + Add(DataTypeNames.Name, oid: 19, arrayOid: 1003); + Add(DataTypeNames.Bytea, oid: 17, arrayOid: 1001); + AddWithRange(DataTypeNames.Date, oid: 1082, arrayOid: 1182, + rangeName: DataTypeNames.DateRange, rangeOid: 3912, rangeArrayOid: 3913, multirangeOid: 4535, multirangeArrayOid: 6155); + Add(DataTypeNames.Time, oid: 1083, arrayOid: 1183); + AddWithRange(DataTypeNames.Timestamp, oid: 1114, arrayOid: 1115, + rangeName: DataTypeNames.TsRange, rangeOid: 3908, rangeArrayOid: 3909, multirangeOid: 4533, multirangeArrayOid: 6152); + AddWithRange(DataTypeNames.TimestampTz, oid: 1184, arrayOid: 1185, + rangeName: DataTypeNames.TsTzRange, rangeOid: 3910, rangeArrayOid: 3911, multirangeOid: 4534, multirangeArrayOid: 6153); + Add(DataTypeNames.Interval, oid: 1186, arrayOid: 1187); + Add(DataTypeNames.TimeTz, oid: 1266, arrayOid: 1270); + Add(DataTypeNames.Inet, oid: 869, arrayOid: 1041); + Add(DataTypeNames.Cidr, oid: 650, arrayOid: 651); + Add(DataTypeNames.MacAddr, oid: 829, arrayOid: 1040); + Add(DataTypeNames.MacAddr8, oid: 774, arrayOid: 775); + Add(DataTypeNames.Bit, oid: 1560, arrayOid: 1561); + Add(DataTypeNames.Varbit, oid: 1562, arrayOid: 1563); + Add(DataTypeNames.TsVector, oid: 3614, arrayOid: 3643); + Add(DataTypeNames.TsQuery, oid: 3615, arrayOid: 3645); + Add(DataTypeNames.RegClass, oid: 2205, arrayOid: 2210); + Add(DataTypeNames.RegCollation, oid: 4191, arrayOid: 4192); + Add(DataTypeNames.RegConfig, oid: 3734, arrayOid: 3735); + Add(DataTypeNames.RegDictionary, oid: 3769, arrayOid: 3770); + Add(DataTypeNames.RegNamespace, oid: 4089, arrayOid: 4090); + Add(DataTypeNames.RegOper, oid: 2203, arrayOid: 2208); + Add(DataTypeNames.RegOperator, oid: 2204, arrayOid: 2209); + Add(DataTypeNames.RegProc, oid: 24, arrayOid: 1008); + Add(DataTypeNames.RegProcedure, oid: 2202, arrayOid: 2207); + Add(DataTypeNames.RegRole, oid: 4096, arrayOid: 4097); + Add(DataTypeNames.Uuid, oid: 2950, arrayOid: 2951); + Add(DataTypeNames.Xml, oid: 142, arrayOid: 143); + Add(DataTypeNames.Json, oid: 114, arrayOid: 199); + Add(DataTypeNames.Jsonb, oid: 3802, arrayOid: 3807); + Add(DataTypeNames.Jsonpath, oid: 4072, arrayOid: 4073); + Add(DataTypeNames.RefCursor, oid: 1790, arrayOid: 2201); + Add(DataTypeNames.OidVector, oid: 30, arrayOid: 1013); + Add(DataTypeNames.Int2Vector, oid: 22, arrayOid: 1006); + Add(DataTypeNames.Oid, oid: 26, arrayOid: 1028); + Add(DataTypeNames.Xid, oid: 28, arrayOid: 1011); + Add(DataTypeNames.Xid8, oid: 5069, arrayOid: 271); + Add(DataTypeNames.Cid, oid: 29, arrayOid: 1012); + Add(DataTypeNames.RegType, oid: 2206, arrayOid: 2211); + Add(DataTypeNames.Tid, oid: 27, arrayOid: 1010); + Add(DataTypeNames.PgLsn, oid: 3220, arrayOid: 3221); + Add(DataTypeNames.Unknown, oid: 705, arrayOid: 0); + Add(DataTypeNames.Void, oid: 2278, arrayOid: 0); + + return types.ToArray(); + + void Add(DataTypeName name, uint oid, uint arrayOid) + { + var type = new PostgresBaseType(name, oid); + types.Add(type); + if (arrayOid is not 0) + types.Add(new PostgresArrayType(name.ToArrayName(), arrayOid, type)); + } + + void AddWithRange(DataTypeName name, uint oid, uint arrayOid, DataTypeName rangeName, uint rangeOid, uint rangeArrayOid, uint multirangeOid, uint multirangeArrayOid) + { + var type = new PostgresBaseType(name, oid); + var rangeType = new PostgresRangeType(rangeName, rangeOid, type); + types.Add(type); + types.Add(new PostgresArrayType(name.ToArrayName(), arrayOid, type)); + types.Add(rangeType); + types.Add(new PostgresArrayType(rangeName.ToArrayName(), rangeArrayOid, rangeType)); + if (withMultiranges) { - var baseType = new PostgresBaseType("pg_catalog", attr.Name, attr.BaseOID); - var arrayType = new PostgresArrayType("pg_catalog", "_" + attr.Name, attr.ArrayOID, baseType); - - if (attr.RangeName is null) - { - return new PostgresType[] { baseType, arrayType }; - } - - var rangeType = new PostgresRangeType("pg_catalog", attr.RangeName, attr.RangeOID, baseType); - - return withMultiranges - ? new PostgresType[] - { - baseType, arrayType, rangeType, - new PostgresMultirangeType("pg_catalog", attr.MultirangeName!, attr.MultirangeOID, rangeType) - } - : new PostgresType[] { baseType, arrayType, rangeType }; - }) - .ToArray(); + var multirangeType = new PostgresMultirangeType(rangeName.ToDefaultMultirangeName(), multirangeOid, rangeType); + types.Add(multirangeType); + types.Add(new PostgresArrayType(multirangeType.DataTypeName.ToArrayName(), multirangeArrayOid, multirangeType)); + } + } + } protected override IEnumerable GetTypes() => SupportsMultirangeTypes @@ -55,8 +127,26 @@ protected override IEnumerable GetTypes() internal PostgresMinimalDatabaseInfo(NpgsqlConnector conn) : base(conn) + => HasIntegerDateTimes = !conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) || + intDateTimes == "on"; + + // TODO, split database info and type catalog. + internal PostgresMinimalDatabaseInfo() + : base("minimal", 5432, "minimal", "14") + { + } + + static PostgresMinimalDatabaseInfo? _defaultTypeCatalog; + internal static PostgresMinimalDatabaseInfo DefaultTypeCatalog { - HasIntegerDateTimes = !conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) || - intDateTimes == "on"; + get + { + if (_defaultTypeCatalog is not null) + return _defaultTypeCatalog; + + var catalog = new PostgresMinimalDatabaseInfo(); + catalog.ProcessTypes(); + return _defaultTypeCatalog = catalog; + } } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresNotice.cs b/src/Npgsql/PostgresNotice.cs index 62e7b886c6..3b619fafdf 100644 --- a/src/Npgsql/PostgresNotice.cs +++ b/src/Npgsql/PostgresNotice.cs @@ -1,4 +1,4 @@ -using System; +using System; using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; @@ -38,16 +38,6 @@ public sealed class PostgresNotice /// public string SqlState { get; set; } - /// - /// The SQLSTATE code for the error. - /// - /// - /// Always present. - /// See https://www.postgresql.org/docs/current/static/errcodes-appendix.html - /// - [Obsolete("Use SqlState instead")] - public string Code => SqlState; - /// /// The primary human-readable error message. This should be accurate but terse. /// @@ -208,7 +198,5 @@ public sealed class NpgsqlNoticeEventArgs : EventArgs public PostgresNotice Notice { get; } internal NpgsqlNoticeEventArgs(PostgresNotice notice) - { - Notice = notice; - } -} \ No newline at end of file + => Notice = notice; +} diff --git a/src/Npgsql/PostgresTypes/PostgresArrayType.cs b/src/Npgsql/PostgresTypes/PostgresArrayType.cs index cfeb89c736..f9ed4e9cd2 100644 --- a/src/Npgsql/PostgresTypes/PostgresArrayType.cs +++ b/src/Npgsql/PostgresTypes/PostgresArrayType.cs @@ -1,4 +1,4 @@ -using System.Diagnostics; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -18,10 +18,19 @@ public class PostgresArrayType : PostgresType /// /// Constructs a representation of a PostgreSQL array data type. /// - protected internal PostgresArrayType(string ns, string internalName, uint oid, PostgresType elementPostgresType) - : base(ns, elementPostgresType.Name + "[]", internalName, oid) + protected internal PostgresArrayType(string ns, string name, uint oid, PostgresType elementPostgresType) + : base(ns, name, oid) + { + Element = elementPostgresType; + Element.Array = this; + } + + /// + /// Constructs a representation of a PostgreSQL array data type. + /// + internal PostgresArrayType(DataTypeName dataTypeName, Oid oid, PostgresType elementPostgresType) + : base(dataTypeName, oid) { - Debug.Assert(internalName == '_' + elementPostgresType.InternalName); Element = elementPostgresType; Element.Array = this; } @@ -34,4 +43,4 @@ internal override string GetPartialNameWithFacets(int typeModifier) internal override PostgresFacets GetFacets(int typeModifier) => Element.GetFacets(typeModifier); -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresBaseType.cs b/src/Npgsql/PostgresTypes/PostgresBaseType.cs index de9a7bc13e..141abf4064 100644 --- a/src/Npgsql/PostgresTypes/PostgresBaseType.cs +++ b/src/Npgsql/PostgresTypes/PostgresBaseType.cs @@ -1,4 +1,6 @@ - + +using Npgsql.Internal.Postgres; + namespace Npgsql.PostgresTypes; /// @@ -6,10 +8,17 @@ namespace Npgsql.PostgresTypes; /// public class PostgresBaseType : PostgresType { - /// - protected internal PostgresBaseType(string ns, string internalName, uint oid) - : base(ns, TranslateInternalName(internalName), internalName, oid) - {} + /// + /// Constructs a representation of a PostgreSQL base data type. + /// + protected internal PostgresBaseType(string ns, string name, uint oid) + : base(ns, name, oid) {} + + /// + /// Constructs a representation of a PostgreSQL base data type. + /// + internal PostgresBaseType(DataTypeName dataTypeName, Oid oid) + : base(dataTypeName, oid) {} /// internal override string GetPartialNameWithFacets(int typeModifier) @@ -68,27 +77,4 @@ internal override PostgresFacets GetFacets(int typeModifier) return PostgresFacets.None; } } - - // The type names returned by PostgreSQL are internal names (int4 instead of - // integer). We perform translation to the user-facing standard names. - // https://www.postgresql.org/docs/current/static/datatype.html#DATATYPE-TABLE - static string TranslateInternalName(string internalName) - => internalName switch - { - "bool" => "boolean", - "bpchar" => "character", - "decimal" => "numeric", - "float4" => "real", - "float8" => "double precision", - "int2" => "smallint", - "int4" => "integer", - "int8" => "bigint", - "time" => "time without time zone", - "timestamp" => "timestamp without time zone", - "timetz" => "time with time zone", - "timestamptz" => "timestamp with time zone", - "varbit" => "bit varying", - "varchar" => "character varying", - _ => internalName - }; -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs index fb31254900..19942ea16e 100644 --- a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs @@ -1,4 +1,5 @@ -using System.Collections.Generic; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -15,15 +16,19 @@ public class PostgresCompositeType : PostgresType /// public IReadOnlyList Fields => MutableFields; - internal List MutableFields { get; } = new(); + internal List MutableFields { get; } = []; /// /// Constructs a representation of a PostgreSQL array data type. /// -#pragma warning disable CA2222 // Do not decrease inherited member visibility internal PostgresCompositeType(string ns, string name, uint oid) : base(ns, name, oid) {} -#pragma warning restore CA2222 // Do not decrease inherited member visibility + + /// + /// Constructs a representation of a PostgreSQL domain data type. + /// + internal PostgresCompositeType(DataTypeName dataTypeName, Oid oid) + : base(dataTypeName, oid) {} /// /// Represents a field in a PostgreSQL composite data type. @@ -48,4 +53,4 @@ internal Field(string name, PostgresType type) /// public override string ToString() => $"{Name} => {Type}"; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresDomainType.cs b/src/Npgsql/PostgresTypes/PostgresDomainType.cs index f9b504b0ca..89ce0350d9 100644 --- a/src/Npgsql/PostgresTypes/PostgresDomainType.cs +++ b/src/Npgsql/PostgresTypes/PostgresDomainType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL domain type. @@ -33,6 +35,16 @@ protected internal PostgresDomainType(string ns, string name, uint oid, Postgres NotNull = notNull; } + /// + /// Constructs a representation of a PostgreSQL domain data type. + /// + internal PostgresDomainType(DataTypeName dataTypeName, Oid oid, PostgresType baseType, bool notNull) + : base(dataTypeName, oid) + { + BaseType = baseType; + NotNull = notNull; + } + internal override PostgresFacets GetFacets(int typeModifier) => BaseType.GetFacets(typeModifier); -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresEnumType.cs b/src/Npgsql/PostgresTypes/PostgresEnumType.cs index f456946070..eb6a1c8f33 100644 --- a/src/Npgsql/PostgresTypes/PostgresEnumType.cs +++ b/src/Npgsql/PostgresTypes/PostgresEnumType.cs @@ -1,4 +1,5 @@ -using System.Collections.Generic; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -15,12 +16,18 @@ public class PostgresEnumType : PostgresType /// public IReadOnlyList Labels => MutableLabels; - internal List MutableLabels { get; } = new(); + internal List MutableLabels { get; } = []; /// /// Constructs a representation of a PostgreSQL enum data type. /// protected internal PostgresEnumType(string ns, string name, uint oid) - : base(ns, name, oid) - {} -} \ No newline at end of file + : base(ns, name, oid) {} + + /// + /// Constructs a representation of a PostgreSQL enum data type. + /// + internal PostgresEnumType(DataTypeName dataTypeName, Oid oid) + : base(dataTypeName, oid) {} + +} diff --git a/src/Npgsql/PostgresTypes/PostgresFacets.cs b/src/Npgsql/PostgresTypes/PostgresFacets.cs index 4c88724965..14672ed19e 100644 --- a/src/Npgsql/PostgresTypes/PostgresFacets.cs +++ b/src/Npgsql/PostgresTypes/PostgresFacets.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Text; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs index 3d35783263..e22b836c51 100644 --- a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL multirange data type. @@ -15,7 +17,7 @@ public class PostgresMultirangeType : PostgresType public PostgresRangeType Subrange { get; } /// - /// Constructs a representation of a PostgreSQL range data type. + /// Constructs a representation of a PostgreSQL multirange data type. /// protected internal PostgresMultirangeType(string ns, string name, uint oid, PostgresRangeType rangePostgresType) : base(ns, name, oid) @@ -23,4 +25,14 @@ protected internal PostgresMultirangeType(string ns, string name, uint oid, Post Subrange = rangePostgresType; Subrange.Multirange = this; } -} \ No newline at end of file + + /// + /// Constructs a representation of a PostgreSQL multirange data type. + /// + internal PostgresMultirangeType(DataTypeName dataTypeName, Oid oid, PostgresRangeType rangePostgresType) + : base(dataTypeName, oid) + { + Subrange = rangePostgresType; + Subrange.Multirange = this; + } +} diff --git a/src/Npgsql/PostgresTypes/PostgresRangeType.cs b/src/Npgsql/PostgresTypes/PostgresRangeType.cs index bc981bd998..cd07a46936 100644 --- a/src/Npgsql/PostgresTypes/PostgresRangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresRangeType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL range data type. @@ -28,4 +30,14 @@ protected internal PostgresRangeType( Subtype = subtypePostgresType; Subtype.Range = this; } -} \ No newline at end of file + + /// + /// Constructs a representation of a PostgreSQL range data type. + /// + internal PostgresRangeType(DataTypeName dataTypeName, Oid oid, PostgresType subtypePostgresType) + : base(dataTypeName, oid) + { + Subtype = subtypePostgresType; + Subtype.Range = this; + } +} diff --git a/src/Npgsql/PostgresTypes/PostgresType.cs b/src/Npgsql/PostgresTypes/PostgresType.cs index 543cf3dcfd..f6e964b368 100644 --- a/src/Npgsql/PostgresTypes/PostgresType.cs +++ b/src/Npgsql/PostgresTypes/PostgresType.cs @@ -1,5 +1,6 @@ -using System; -using System.Linq; +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -20,25 +21,23 @@ public abstract class PostgresType /// Constructs a representation of a PostgreSQL data type. /// /// The data type's namespace (or schema). - /// The data type's name. + /// The data type's display name. /// The data type's OID. - protected PostgresType(string ns, string name, uint oid) - : this(ns, name, name, oid) {} + private protected PostgresType(string ns, string name, uint oid) + { + DataTypeName = DataTypeName.FromDisplayName(ns is null or "pg_catalog" ? name : ns + "." + name); + OID = oid; + } /// /// Constructs a representation of a PostgreSQL data type. /// - /// The data type's namespace (or schema). - /// The data type's name. - /// The data type's internal name (e.g. _int4 for integer[]). + /// The data type's fully qualified name. /// The data type's OID. - protected PostgresType(string ns, string name, string internalName, uint oid) + private protected PostgresType(DataTypeName dataTypeName, Oid oid) { - Namespace = ns; - Name = name; - FullName = Namespace + '.' + Name; - InternalName = internalName; - OID = oid; + DataTypeName = dataTypeName; + OID = oid.Value; } #endregion @@ -53,7 +52,7 @@ protected PostgresType(string ns, string name, string internalName, uint oid) /// /// The data type's namespace (or schema). /// - public string Namespace { get; } + public string Namespace => DataTypeName.Schema; /// /// The data type's name. @@ -62,24 +61,27 @@ protected PostgresType(string ns, string name, string internalName, uint oid) /// Note that this is the standard, user-displayable type name (e.g. integer[]) rather than the internal /// PostgreSQL name as it is in pg_type (_int4). See for the latter. /// - public string Name { get; } + public string Name => DataTypeName.UnqualifiedDisplayName; /// /// The full name of the backend type, including its namespace. /// - public string FullName { get; } + [field: MaybeNull] + public string FullName => field ??= Namespace + "." + Name; + + internal DataTypeName DataTypeName { get; } /// /// A display name for this backend type, including the namespace unless it is pg_catalog (the namespace /// for all built-in types). /// - public string DisplayName => Namespace == "pg_catalog" ? Name : FullName; + public string DisplayName => DataTypeName.DisplayName; /// /// The data type's internal PostgreSQL name (e.g. _int4 not integer[]). /// See for a more user-friendly name. /// - public string InternalName { get; } + public string InternalName => DataTypeName.UnqualifiedName; /// /// If a PostgreSQL array type exists for this type, it will be referenced here. @@ -111,4 +113,21 @@ internal string GetDisplayNameWithFacets(int typeModifier) /// Returns a string that represents the current object. /// public override string ToString() => DisplayName; -} \ No newline at end of file + + PostgresType? _representationalType; + + /// Canonizes (nested) domain types to underlying types, does not handle composites. + internal PostgresType GetRepresentationalType() + { + return _representationalType ??= Core(this) ?? throw new InvalidOperationException("Couldn't map type to representational type"); + + static PostgresType? Core(PostgresType? postgresType) + => (postgresType as PostgresDomainType)?.BaseType ?? postgresType switch + { + PostgresArrayType { Element: PostgresDomainType domain } => Core(domain.BaseType)?.Array, + PostgresMultirangeType { Subrange.Subtype: PostgresDomainType domain } => domain.BaseType.Range?.Multirange, + PostgresRangeType { Subtype: PostgresDomainType domain } => domain.Range, + var type => type + }; + } +} diff --git a/src/Npgsql/PostgresTypes/PostgresTypeKind.cs b/src/Npgsql/PostgresTypes/PostgresTypeKind.cs new file mode 100644 index 0000000000..03330f9050 --- /dev/null +++ b/src/Npgsql/PostgresTypes/PostgresTypeKind.cs @@ -0,0 +1,21 @@ +namespace Npgsql.PostgresTypes; + +enum PostgresTypeKind +{ + /// A base type. + Base, + /// An enum carrying its variants. + Enum, + /// A pseudo type like anyarray. + Pseudo, + // An array carrying its element type. + Array, + // A range carrying its element type. + Range, + // A multi-range carrying its element type. + Multirange, + // A domain carrying its underlying type. + Domain, + // A composite carrying its constituent fields. + Composite +} diff --git a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs index a520df9696..fa9bf74c54 100644 --- a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs +++ b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs @@ -1,16 +1,16 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL data type that isn't known to Npgsql and cannot be handled. /// -public class UnknownBackendType : PostgresType +public sealed class UnknownBackendType : PostgresType { internal static readonly PostgresType Instance = new UnknownBackendType(); /// /// Constructs a the unknown backend type. /// -#pragma warning disable CA2222 // Do not decrease inherited member visibility - UnknownBackendType() : base("", "", 0) { } -#pragma warning restore CA2222 // Do not decrease inherited member visibility -} \ No newline at end of file + UnknownBackendType() : base(DataTypeName.Unspecified,0) { } +} diff --git a/src/Npgsql/PregeneratedMessages.cs b/src/Npgsql/PregeneratedMessages.cs index 3d315ff12f..b1812360e6 100644 --- a/src/Npgsql/PregeneratedMessages.cs +++ b/src/Npgsql/PregeneratedMessages.cs @@ -1,8 +1,7 @@ -using System.Diagnostics; using System.IO; -using System.Linq; using System.Text; using Npgsql.Internal; +using Npgsql.Util; namespace Npgsql; @@ -10,11 +9,9 @@ static class PregeneratedMessages { static PregeneratedMessages() { -#pragma warning disable CS8625 // This is the only use of a write buffer without a connector, for in-memory construction of // pregenerated messages. using var buf = new NpgsqlWriteBuffer(null, new MemoryStream(), null, NpgsqlWriteBuffer.MinimumSize, Encoding.ASCII); -#pragma warning restore CS8625 BeginTransRepeatableRead = Generate(buf, "BEGIN ISOLATION LEVEL REPEATABLE READ"); BeginTransSerializable = Generate(buf, "BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE"); @@ -27,9 +24,9 @@ static PregeneratedMessages() internal static byte[] Generate(NpgsqlWriteBuffer buf, string query) { - Debug.Assert(query.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(query); - var queryByteLen = Encoding.ASCII.GetByteCount(query); + var queryByteLen = buf.TextEncoding.GetByteCount(query); buf.WriteByte(FrontendMessageCode.Query); buf.WriteInt32(4 + // Message length (including self excluding code) @@ -52,4 +49,4 @@ internal static byte[] Generate(NpgsqlWriteBuffer buf, string query) internal static readonly byte[] RollbackTransaction; internal static readonly byte[] DiscardAll; -} \ No newline at end of file +} diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index 015adc5dd3..2f63b182e7 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -1,7 +1,9 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; +using System.Text; using Npgsql.BackendMessages; +using Npgsql.Internal.Postgres; namespace Npgsql; @@ -16,7 +18,7 @@ sealed class PreparedStatement internal string Sql { get; } - internal string? Name; + internal byte[]? Name; internal RowDescriptionMessage? Description; @@ -24,7 +26,8 @@ sealed class PreparedStatement internal PreparedState State { get; set; } - internal bool IsPrepared => State == PreparedState.Prepared; + // Invalidated statement is still prepared and allocated on PG's side + internal bool IsPrepared => State is PreparedState.Prepared or PreparedState.Invalidated; /// /// If true, the user explicitly requested this statement be prepared. It does not get closed as part of @@ -40,15 +43,15 @@ sealed class PreparedStatement internal int AutoPreparedSlotIndex { get; set; } - internal DateTime LastUsed { get; set; } + internal long LastUsed { get; set; } + + internal void RefreshLastUsed() => LastUsed = Stopwatch.GetTimestamp(); /// /// Contains the handler types for a prepared statement's parameters, for overloaded cases (same SQL, different param types) /// Only populated after the statement has been prepared (i.e. null for candidates). /// - internal Type[]? HandlerParamTypes { get; private set; } - - static readonly Type[] EmptyParamTypes = Type.EmptyTypes; + PgTypeId[]? ConverterParamTypes { get; set; } internal static PreparedStatement CreateExplicit( PreparedStatementManager manager, @@ -59,7 +62,7 @@ internal static PreparedStatement CreateExplicit( { var pStatement = new PreparedStatement(manager, sql, true) { - Name = name, + Name = Encoding.ASCII.GetBytes(name), StatementBeingReplaced = statementBeingReplaced }; pStatement.SetParamTypes(parameters); @@ -81,22 +84,23 @@ internal void SetParamTypes(List parameters) { if (parameters.Count == 0) { - HandlerParamTypes = EmptyParamTypes; + ConverterParamTypes = []; return; } - HandlerParamTypes = new Type[parameters.Count]; + ConverterParamTypes = new PgTypeId[parameters.Count]; for (var i = 0; i < parameters.Count; i++) - HandlerParamTypes[i] = parameters[i].Handler!.GetType(); + ConverterParamTypes[i] = parameters[i].PgTypeId; } internal bool DoParametersMatch(List parameters) { - if (HandlerParamTypes!.Length != parameters.Count) + var paramTypes = ConverterParamTypes!; + if (paramTypes.Length != parameters.Count) return false; - for (var i = 0; i < HandlerParamTypes.Length; i++) - if (HandlerParamTypes[i] != parameters[i].Handler!.GetType()) + for (var i = 0; i < paramTypes.Length; i++) + if (paramTypes[i] != parameters[i].PgTypeId) return false; return true; @@ -170,4 +174,4 @@ enum PreparedState /// The statement was invalidated because e.g. table schema has changed since preparation. /// Invalidated -} \ No newline at end of file +} diff --git a/src/Npgsql/PreparedStatementManager.cs b/src/Npgsql/PreparedStatementManager.cs index c7f18c52e5..57c4c90af9 100644 --- a/src/Npgsql/PreparedStatementManager.cs +++ b/src/Npgsql/PreparedStatementManager.cs @@ -1,6 +1,7 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; +using System.Text; using Microsoft.Extensions.Logging; using Npgsql.Internal; @@ -60,12 +61,13 @@ internal PreparedStatementManager(NpgsqlConnector connector) if (BySql.TryGetValue(sql, out var pStatement)) { Debug.Assert(pStatement.State != PreparedState.Unprepared); - if (pStatement.IsExplicit) + // If statement is invalidated, fall through below where we replace it with another + if (pStatement.IsExplicit && pStatement.State != PreparedState.Invalidated) { // Great, we've found an explicit prepared statement. // We just need to check that the parameter types correspond, since prepared statements are // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. - return pStatement.DoParametersMatch(batchCommand.PositionalParameters!) + return pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly) ? pStatement : null; } @@ -77,8 +79,10 @@ internal PreparedStatementManager(NpgsqlConnector connector) // Found a candidate for autopreparation. Remove it and prepare explicitly. RemoveCandidate(pStatement); break; + // The statement is invalidated. Just replace it with a new one. + case PreparedState.Invalidated: + // The statement has already been autoprepared. We need to "promote" it to explicit. case PreparedState.Prepared: - // The statement has already been autoprepared. We need to "promote" it to explicit. statementBeingReplaced = pStatement; break; case PreparedState.Unprepared: @@ -89,167 +93,186 @@ internal PreparedStatementManager(NpgsqlConnector connector) } // Statement hasn't been prepared yet - return BySql[sql] = PreparedStatement.CreateExplicit(this, sql, NextPreparedStatementName(), batchCommand.PositionalParameters, statementBeingReplaced); + return BySql[sql] = PreparedStatement.CreateExplicit(this, sql, NextPreparedStatementName(), batchCommand.CurrentParametersReadOnly, statementBeingReplaced); } internal PreparedStatement? TryGetAutoPrepared(NpgsqlBatchCommand batchCommand) { var sql = batchCommand.FinalCommandText!; - if (!BySql.TryGetValue(sql, out var pStatement)) + // We could also test for PreparedState.BeingPrepared as it's handled the exact same way as PreparedState.Prepared + // But since it's so rare we'll just go through the slow path + if (!BySql.TryGetValue(sql, out var pStatement) || pStatement.State != PreparedState.Prepared) + return TryGetAutoPreparedSlow(batchCommand, pStatement); + + // The statement has already been prepared (explicitly or automatically) + // We just need to check that the parameter types correspond, since prepared statements are + // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. + if (!pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly)) + return null; + // Prevent this statement from being replaced within this batch + pStatement.LastUsed = long.MaxValue; + return pStatement; + + PreparedStatement? TryGetAutoPreparedSlow(NpgsqlBatchCommand batchCommand, PreparedStatement? pStatement) { - // New candidate. Find an empty candidate slot or eject a least-used one. - int slotIndex = -1, leastUsages = int.MaxValue; - var lastUsed = DateTime.MaxValue; - for (var i = 0; i < _candidates.Length; i++) + var sql = batchCommand.FinalCommandText!; + if (pStatement is null) { - var candidate = _candidates[i]; - // ReSharper disable once ConditionIsAlwaysTrueOrFalse - // ReSharper disable HeuristicUnreachableCode - if (candidate == null) // Found an unused candidate slot, return immediately - { - slotIndex = i; - break; - } - // ReSharper restore HeuristicUnreachableCode - if (candidate.Usages < leastUsages) - { - leastUsages = candidate.Usages; - slotIndex = i; - lastUsed = candidate.LastUsed; - } - else if (candidate.Usages == leastUsages && candidate.LastUsed < lastUsed) + // New candidate. Find an empty candidate slot or eject a least-used one. + int slotIndex = -1, leastUsages = int.MaxValue; + var lastUsed = long.MaxValue; + for (var i = 0; i < _candidates.Length; i++) { - slotIndex = i; - lastUsed = candidate.LastUsed; + var candidate = _candidates[i]; + // ReSharper disable once ConditionIsAlwaysTrueOrFalse + // ReSharper disable HeuristicUnreachableCode + if (candidate == null) // Found an unused candidate slot, return immediately + { + slotIndex = i; + break; + } + // ReSharper restore HeuristicUnreachableCode + if (candidate.Usages < leastUsages) + { + leastUsages = candidate.Usages; + slotIndex = i; + lastUsed = candidate.LastUsed; + } + else if (candidate.Usages == leastUsages && candidate.LastUsed < lastUsed) + { + slotIndex = i; + lastUsed = candidate.LastUsed; + } } + + var leastUsed = _candidates[slotIndex]; + // ReSharper disable once ConditionIsAlwaysTrueOrFalse + if (leastUsed != null) + BySql.Remove(leastUsed.Sql); + pStatement = BySql[sql] = _candidates[slotIndex] = PreparedStatement.CreateAutoPrepareCandidate(this, sql); } - var leastUsed = _candidates[slotIndex]; - // ReSharper disable once ConditionIsAlwaysTrueOrFalse - if (leastUsed != null) - BySql.Remove(leastUsed.Sql); - pStatement = BySql[sql] = _candidates[slotIndex] = PreparedStatement.CreateAutoPrepareCandidate(this, sql); - } + switch (pStatement.State) + { + case PreparedState.NotPrepared: + case PreparedState.Invalidated: + break; - switch (pStatement.State) - { - case PreparedState.NotPrepared: - case PreparedState.Invalidated: - break; - - case PreparedState.Prepared: - case PreparedState.BeingPrepared: - // The statement has already been prepared (explicitly or automatically), or has been selected - // for preparation (earlier identical statement in the same command). - // We just need to check that the parameter types correspond, since prepared statements are - // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. - if (!pStatement.DoParametersMatch(batchCommand.PositionalParameters)) + // We shouldn't ever get PreparedState.Prepared since it's handled above but handle it here just in case + case PreparedState.Prepared: + case PreparedState.BeingPrepared: + // The statement has already been prepared (explicitly or automatically), or has been selected + // for preparation (earlier identical statement in the same command). + // We just need to check that the parameter types correspond, since prepared statements are + // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. + if (!pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly)) + return null; + // Prevent this statement from being replaced within this batch + pStatement.LastUsed = long.MaxValue; + return pStatement; + + case PreparedState.BeingUnprepared: + // The statement is being replaced by an earlier statement in this same batch. return null; - // Prevent this statement from being replaced within this batch - pStatement.LastUsed = DateTime.MaxValue; - return pStatement; - - case PreparedState.BeingUnprepared: - // The statement is being replaced by an earlier statement in this same batch. - return null; - - default: - Debug.Fail($"Unexpected {nameof(PreparedState)} in auto-preparation: {pStatement.State}"); - break; - } - if (++pStatement.Usages < UsagesBeforePrepare) - { - // Statement still hasn't passed the usage threshold, no automatic preparation. - // Return null for unprepared execution. - pStatement.LastUsed = DateTime.UtcNow; - return null; - } - - // Bingo, we've just passed the usage threshold, statement should get prepared - LogMessages.AutoPreparingStatement(_commandLogger, sql, _connector.Id); - - // Look for either an empty autoprepare slot, or the least recently used prepared statement which we'll replace it. - var oldestTimestamp = DateTime.MaxValue; - var selectedIndex = -1; - for (var i = 0; i < AutoPrepared.Length; i++) - { - var slot = AutoPrepared[i]; + default: + Debug.Fail($"Unexpected {nameof(PreparedState)} in auto-preparation: {pStatement.State}"); + break; + } - if (slot is null or { State: PreparedState.Invalidated }) + if (++pStatement.Usages < UsagesBeforePrepare) { - // We found a free or invalidated slot, exit the loop immediately - selectedIndex = i; - break; + // Statement still hasn't passed the usage threshold, no automatic preparation. + // Return null for unprepared execution. + pStatement.RefreshLastUsed(); + return null; } - switch (slot.State) + // Bingo, we've just passed the usage threshold, statement should get prepared + LogMessages.AutoPreparingStatement(_commandLogger, sql, _connector.Id); + + // Look for either an empty autoprepare slot, or the least recently used prepared statement which we'll replace it. + var oldestLastUsed = long.MaxValue; + var selectedIndex = -1; + for (var i = 0; i < AutoPrepared.Length; i++) { - case PreparedState.Prepared: - if (slot.LastUsed < oldestTimestamp) + var slot = AutoPrepared[i]; + + if (slot is null or { State: PreparedState.Invalidated }) { + // We found a free or invalidated slot, exit the loop immediately selectedIndex = i; - oldestTimestamp = slot.LastUsed; + break; } - break; - case PreparedState.BeingPrepared: - // Slot has already been selected for preparation by an earlier statement in this batch. Skip it. - continue; + switch (slot.State) + { + case PreparedState.Prepared: + if (slot.LastUsed < oldestLastUsed) + { + selectedIndex = i; + oldestLastUsed = slot.LastUsed; + } + break; - default: - throw new Exception( - $"Invalid {nameof(PreparedState)} state {slot.State} encountered when scanning prepared statement slots"); + case PreparedState.BeingPrepared: + // Slot has already been selected for preparation by an earlier statement in this batch. Skip it. + continue; + + default: + ThrowHelper.ThrowInvalidOperationException($"Invalid {nameof(PreparedState)} state {slot.State} encountered when scanning prepared statement slots"); + return null; + } } - } - if (selectedIndex == -1) - { - // We're here if we couldn't find a free slot or a prepared statement to replace - this means all slots are taken by - // statements being prepared in this batch. - return null; - } + if (selectedIndex < 0) + { + // We're here if we couldn't find a free slot or a prepared statement to replace - this means all slots are taken by + // statements being prepared in this batch. + return null; + } - if (pStatement.State != PreparedState.Invalidated) - RemoveCandidate(pStatement); + if (pStatement.State != PreparedState.Invalidated) + RemoveCandidate(pStatement); - var oldPreparedStatement = AutoPrepared[selectedIndex]; + var oldPreparedStatement = AutoPrepared[selectedIndex]; - if (oldPreparedStatement is null) - { - pStatement.Name = "_auto" + selectedIndex; - } - else - { - // When executing an invalidated prepared statement, the old and the new statements are the same instance. - // Create a copy so that we have two distinct instances with their own states. - if (oldPreparedStatement == pStatement) + if (oldPreparedStatement is null) { - oldPreparedStatement = new PreparedStatement(this, oldPreparedStatement.Sql, isExplicit: false) - { - Name = oldPreparedStatement.Name - }; + pStatement.Name = Encoding.ASCII.GetBytes("_auto" + selectedIndex); } + else + { + // When executing an invalidated prepared statement, the old and the new statements are the same instance. + // Create a copy so that we have two distinct instances with their own states. + if (oldPreparedStatement == pStatement) + { + oldPreparedStatement = new PreparedStatement(this, oldPreparedStatement.Sql, isExplicit: false) + { + Name = oldPreparedStatement.Name + }; + } - pStatement.Name = oldPreparedStatement.Name; - pStatement.State = PreparedState.NotPrepared; - pStatement.StatementBeingReplaced = oldPreparedStatement; - oldPreparedStatement.State = PreparedState.BeingUnprepared; - } + pStatement.Name = oldPreparedStatement.Name; + pStatement.State = PreparedState.NotPrepared; + pStatement.StatementBeingReplaced = oldPreparedStatement; + oldPreparedStatement.State = PreparedState.BeingUnprepared; + } - pStatement.AutoPreparedSlotIndex = selectedIndex; - AutoPrepared[selectedIndex] = pStatement; + pStatement.AutoPreparedSlotIndex = selectedIndex; + AutoPrepared[selectedIndex] = pStatement; - // Make sure this statement isn't replaced by a later statement in the same batch. - pStatement.LastUsed = DateTime.MaxValue; + // Make sure this statement isn't replaced by a later statement in the same batch. + pStatement.LastUsed = long.MaxValue; - // Note that the parameter types are only set at the moment of preparation - in the candidate phase - // there's no differentiation between overloaded statements, which are a pretty rare case, saving - // allocations. - pStatement.SetParamTypes(batchCommand.PositionalParameters); + // Note that the parameter types are only set at the moment of preparation - in the candidate phase + // there's no differentiation between overloaded statements, which are a pretty rare case, saving + // allocations. + pStatement.SetParamTypes(batchCommand.CurrentParametersReadOnly); - return pStatement; + return pStatement; + } } void RemoveCandidate(PreparedStatement candidate) @@ -278,4 +301,4 @@ internal void ClearAll() for (var i = 0; i < _candidates.Length; i++) _candidates[i] = null; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 145af2037a..d9f3dd06b4 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -1,23 +1,20 @@ -using System; +using System; using System.IO; using System.Threading; using System.Threading.Tasks; -using Npgsql.Internal; namespace Npgsql; sealed class PreparedTextReader : TextReader { string _str = null!; - NpgsqlReadBuffer.ColumnStream _stream = null!; int _position; bool _disposed; - public void Init(string str, NpgsqlReadBuffer.ColumnStream stream) + public void Init(string str) { _str = str; - _stream = stream; _disposed = false; _position = 0; } @@ -27,7 +24,7 @@ public void Init(string str, NpgsqlReadBuffer.ColumnStream stream) public override int Peek() { CheckDisposed(); - + return _position < _str.Length ? _str[_position] : -1; @@ -36,17 +33,13 @@ public override int Peek() public override int Read() { CheckDisposed(); - + return _position < _str.Length ? _str[_position++] : -1; } -#if NETSTANDARD2_0 - public int Read(Span buffer) -#else public override int Read(Span buffer) -#endif { CheckDisposed(); @@ -61,17 +54,12 @@ public override int Read(Span buffer) public override int Read(char[] buffer, int index, int count) { - if (buffer == null) - { - throw new ArgumentNullException(nameof(buffer)); - } - if (index < 0 || count < 0) - { - throw new ArgumentOutOfRangeException(index < 0 ? nameof(index) : nameof(count)); - } + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(index); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - index < count) { - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } return Read(buffer.AsSpan(index, count)); @@ -80,18 +68,14 @@ public override int Read(char[] buffer, int index, int count) public override Task ReadAsync(char[] buffer, int index, int count) => Task.FromResult(Read(buffer, index, count)); - public -#if !NETSTANDARD2_0 - override -#endif - ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) => new(Read(buffer.Span)); + public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) => new(Read(buffer.Span)); public override Task ReadLineAsync() => Task.FromResult(ReadLine()); public override string ReadToEnd() { CheckDisposed(); - + if (_position == _str.Length) return string.Empty; @@ -103,19 +87,19 @@ public override string ReadToEnd() public override Task ReadToEndAsync() => Task.FromResult(ReadToEnd()); void CheckDisposed() + => ObjectDisposedException.ThrowIf(_disposed, this); + + public void Restart() { - if (_disposed || _stream.IsDisposed) - throw new ObjectDisposedException(null); + CheckDisposed(); + _position = 0; } protected override void Dispose(bool disposing) { - base.Dispose(disposing); - if (disposing) - { _disposed = true; - _stream.Dispose(); - } + + base.Dispose(disposing); } } diff --git a/src/Npgsql/Properties/AssemblyInfo.cs b/src/Npgsql/Properties/AssemblyInfo.cs index e71a69a9dd..eab391f2c8 100644 --- a/src/Npgsql/Properties/AssemblyInfo.cs +++ b/src/Npgsql/Properties/AssemblyInfo.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Runtime.CompilerServices; using System.Reflection; using System.Security; @@ -8,23 +8,7 @@ [assembly: AssemblyTrademark("")] [assembly: SecurityRules(SecurityRuleSet.Level1)] -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif - -[assembly: InternalsVisibleTo("EntityFramework6.Npgsql, PublicKey=" + -"0024000004800000940000000602000000240000525341310004000001000100" + -"2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + -"8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + -"7aa16153bcea2ae9a471145624826f60d7c8e71cd025b554a0177bd935a78096" + -"29f0a7afc778ebb4ad033e1bf512c1a9c6ceea26b077bc46cac93800435e77ee")] - -[assembly: InternalsVisibleTo("EntityFramework5.Npgsql, PublicKey=" + -"0024000004800000940000000602000000240000525341310004000001000100" + -"2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + -"8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + -"7aa16153bcea2ae9a471145624826f60d7c8e71cd025b554a0177bd935a78096" + -"29f0a7afc778ebb4ad033e1bf512c1a9c6ceea26b077bc46cac93800435e77ee")] [assembly: InternalsVisibleTo("Npgsql.Tests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + @@ -33,7 +17,7 @@ "7aa16153bcea2ae9a471145624826f60d7c8e71cd025b554a0177bd935a78096" + "29f0a7afc778ebb4ad033e1bf512c1a9c6ceea26b077bc46cac93800435e77ee")] -[assembly: InternalsVisibleTo("Npgsql.NodaTime.Tests, PublicKey=" + +[assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + "2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + "8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index ab4bb536bf..130ac94f8e 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -1,4 +1,4 @@ -//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ // // This code was generated by a tool. // @@ -11,32 +11,46 @@ namespace Npgsql.Properties { using System; - [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [System.Diagnostics.DebuggerNonUserCodeAttribute()] - [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlStrings { - private static System.Resources.ResourceManager resourceMan; + private static global::System.Resources.ResourceManager resourceMan; - private static System.Globalization.CultureInfo resourceCulture; + private static global::System.Globalization.CultureInfo resourceCulture; - [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlStrings() { } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Resources.ResourceManager ResourceManager { + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { get { - if (object.Equals(null, resourceMan)) { - System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); resourceMan = temp; } return resourceMan; } } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Globalization.CultureInfo Culture { + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -45,94 +59,276 @@ internal static System.Globalization.CultureInfo Culture { } } - internal static string CannotUseSslVerifyWithUserCallback { + /// + /// Looks up a localized string similar to '{0}' must be positive.. + /// + internal static string ArgumentMustBePositive { get { - return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); + return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); } } - internal static string CannotUseSslRootCertificateWithUserCallback { + /// + /// Looks up a localized string similar to Arrays aren't enabled; please call {0} on {1} to enable arrays.. + /// + internal static string ArraysNotEnabled { get { - return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); + return ResourceManager.GetString("ArraysNotEnabled", resourceCulture); } } - internal static string CannotUseSslModeRequireWithoutTrustServerCertificate { + /// + /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. + /// + internal static string CannotReadInfinityValue { get { - return ResourceManager.GetString("CannotUseSslModeRequireWithoutTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } - internal static string CannotUseTrustServerCertificate { + /// + /// Looks up a localized string similar to Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.. + /// + internal static string CannotReadIntervalWithMonthsAsTimeSpan { get { - return ResourceManager.GetString("CannotUseTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); } } - internal static string NoMultirangeTypeFound { + /// + /// Looks up a localized string similar to When registering a password provider, a password or password file may not be set.. + /// + internal static string CannotSetBothPasswordProviderAndPassword { get { - return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); } } - internal static string NotSupportedOnDataSourceCommand { + /// + /// Looks up a localized string similar to Multiple kinds of password providers were found, only one kind may be configured per DbDataSource.. + /// + internal static string CannotSetMultiplePasswordProviderKinds { get { - return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + return ResourceManager.GetString("CannotSetMultiplePasswordProviderKinds", resourceCulture); } } - internal static string NotSupportedOnDataSourceBatch { + /// + /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslRootCertificateWithCustomValidationCallback { get { - return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); + return ResourceManager.GetString("CannotUseSslRootCertificateWithCustomValidationCallback", resourceCulture); } } - internal static string CannotSetBothPasswordProviderAndPassword { + /// + /// Looks up a localized string similar to SslMode.{0} cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslVerifyWithCustomValidationCallback { get { - return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); + return ResourceManager.GetString("CannotUseSslVerifyWithCustomValidationCallback", resourceCulture); } } - internal static string PasswordProviderMissing { + /// + /// Looks up a localized string similar to ValidationRootCertificateCallback cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseValidationRootCertificateCallbackWithCustomValidationCallback { get { - return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithCustomValidationCallback", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Cube isn't enabled; please call {0} on {1} to enable Cube.. + /// + internal static string CubeNotEnabled { + get { + return ResourceManager.GetString("CubeNotEnabled", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Type '{0}' required dynamic JSON serialization, which requires an explicit opt-in; call '{1}' on '{2}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/json.html and the 8.0 release notes for more details). Alternatively, if you meant to use Newtonsoft JSON.NET instead of System.Text.Json, call UseJsonNet() instead. + ///. + /// + internal static string DynamicJsonNotEnabled { + get { + return ResourceManager.GetString("DynamicJsonNotEnabled", resourceCulture); } } - internal static string ArgumentMustBePositive { + /// + /// Looks up a localized string similar to Full-text search isn't enabled; please call {0} on {1} to enable full-text search.. + /// + internal static string FullTextSearchNotEnabled { get { - return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); } } - internal static string CannotSpecifyTargetSessionAttributes { + /// + /// Looks up a localized string similar to Integrated security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it.. + /// + internal static string IntegratedSecurityDisabled { get { - return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); + return ResourceManager.GetString("IntegratedSecurityDisabled", resourceCulture); } } - internal static string CannotReadIntervalWithMonthsAsTimeSpan { + /// + /// Looks up a localized string similar to Ltree isn't enabled; please call {0} on {1} to enable LTree.. + /// + internal static string LTreeNotEnabled { get { - return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); + return ResourceManager.GetString("LTreeNotEnabled", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Multiranges aren't enabled; please call {0} on {1} to enable multiranges.. + /// + internal static string MultirangesNotEnabled { + get { + return ResourceManager.GetString("MultirangesNotEnabled", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to No multirange type could be found in the database for subtype {0}.. + /// + internal static string NoMultirangeTypeFound { + get { + return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on batches created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceBatch { + get { + return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); } } + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on commands created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceCommand { + get { + return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to The right type of password provider (sync or async) was not found.. + /// + internal static string PasswordProviderMissing { + get { + return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to When using CommandType.StoredProcedure, all positional parameters must come before named parameters.. + /// internal static string PositionalParameterAfterNamed { get { return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); } } - internal static string CannotReadInfinityValue { + /// + /// Looks up a localized string similar to Ranges aren't enabled; please call {0} on {1} to enable ranges.. + /// + internal static string RangesNotEnabled { get { - return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + return ResourceManager.GetString("RangesNotEnabled", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Could not read a PostgreSQL record. If you're attempting to read a record as a .NET tuple, call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/basic.html and the 8.0 release notes for more details). If you're reading a record as a .NET object array using NpgsqlSlimDataSourceBuilder, call '{2}'. + ///. + /// + internal static string RecordsNotEnabled { + get { + return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); } } + /// + /// Looks up a localized string similar to SslClientAuthenticationOptionsCallback is not supported together with UserCertificateValidationCallback and ClientCertificatesCallback. + /// + internal static string SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported { + get { + return ResourceManager.GetString("SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Both sync and async connection initializers must be provided.. + /// internal static string SyncAndAsyncConnectionInitializersRequired { get { return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); } } + + /// + /// Looks up a localized string similar to Both sync and async password providers must be provided.. + /// + internal static string SyncAndAsyncPasswordProvidersRequired { + get { + return ResourceManager.GetString("SyncAndAsyncPasswordProvidersRequired", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Cannot write DateTime with Kind=UTC to PostgreSQL type '{0}', consider using '{1}'. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange.. + /// + internal static string TimestampNoDateTimeUtc { + get { + return ResourceManager.GetString("TimestampNoDateTimeUtc", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Cannot write DateTime with Kind={0} to PostgreSQL type '{1}', only UTC is supported. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange.. + /// + internal static string TimestampTzNoDateTimeUnspecified { + get { + return ResourceManager.GetString("TimestampTzNoDateTimeUnspecified", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Transport security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it.. + /// + internal static string TransportSecurityDisabled { + get { + return ResourceManager.GetString("TransportSecurityDisabled", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Reading and writing unmapped enums requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/enums_and_composites.html and the 8.0 release notes for more details).. + /// + internal static string UnmappedEnumsNotEnabled { + get { + return ResourceManager.GetString("UnmappedEnumsNotEnabled", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Reading and writing unmapped ranges and multiranges requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/ranges.html and the 8.0 release notes for more details).. + /// + internal static string UnmappedRangesNotEnabled { + get { + return ResourceManager.GetString("UnmappedRangesNotEnabled", resourceCulture); + } + } } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 89f6d038d9..c39af4abc4 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -18,17 +18,17 @@ System.Resources.ResXResourceWriter, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - - SslMode.{0} cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + SslMode.{0} cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. - - RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + RootCertificate cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. - - To validate server certificates, please use VerifyFull or VerifyCA instead of Require. To disable validation, explicitly set 'Trust Server Certificate' to true. See https://www.npgsql.org/doc/release-notes/6.0.html for more details. + + Transport security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it. - - TrustServerCertificate=true is not supported with SslMode={0} + + Integrated security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it. No multirange type could be found in the database for subtype {0}. @@ -42,15 +42,18 @@ When registering a password provider, a password or password file may not be set. + + Multiple kinds of password providers were found, only one kind may be configured per DbDataSource. + + + Both sync and async password providers must be provided. + The right type of password provider (sync or async) was not found. '{0}' must be positive. - - When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information. - Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it. @@ -63,4 +66,48 @@ Both sync and async connection initializers must be provided. - \ No newline at end of file + + ValidationRootCertificateCallback cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + + Could not read a PostgreSQL record. If you're attempting to read a record as a .NET tuple, call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/basic.html and the 8.0 release notes for more details). If you're reading a record as a .NET object array using NpgsqlSlimDataSourceBuilder, call '{2}'. + + + + Full-text search isn't enabled; please call {0} on {1} to enable full-text search. + + + Ltree isn't enabled; please call {0} on {1} to enable LTree. + + + Cube isn't enabled; please call {0} on {1} to enable Cube. + + + Ranges aren't enabled; please call {0} on {1} to enable ranges. + + + Multiranges aren't enabled; please call {0} on {1} to enable multiranges. + + + Arrays aren't enabled; please call {0} on {1} to enable arrays. + + + Cannot write DateTime with Kind={0} to PostgreSQL type '{1}', only UTC is supported. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange. + + + Cannot write DateTime with Kind=UTC to PostgreSQL type '{0}', consider using '{1}'. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange. + + + Type '{0}' required dynamic JSON serialization, which requires an explicit opt-in; call '{1}' on '{2}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/json.html and the 8.0 release notes for more details). Alternatively, if you meant to use Newtonsoft JSON.NET instead of System.Text.Json, call UseJsonNet() instead. + + + + Reading and writing unmapped enums requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/enums_and_composites.html and the 8.0 release notes for more details). + + + Reading and writing unmapped ranges and multiranges requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/ranges.html and the 8.0 release notes for more details). + + + SslClientAuthenticationOptionsCallback is not supported together with UserCertificateValidationCallback and ClientCertificatesCallback + + diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 79818d3afd..2b80b24ca4 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1,258 +1,4 @@ -#nullable enable -abstract Npgsql.Logging.NpgsqlLogger.IsEnabled(Npgsql.Logging.NpgsqlLogLevel level) -> bool -abstract Npgsql.Logging.NpgsqlLogger.Log(Npgsql.Logging.NpgsqlLogLevel level, int connectorId, string! msg, System.Exception? exception = null) -> void -abstract Npgsql.Replication.PgOutput.Messages.UpdateMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! -abstract NpgsqlTypes.NpgsqlTsQuery.Equals(NpgsqlTypes.NpgsqlTsQuery? other) -> bool -const Npgsql.NpgsqlConnection.DefaultPort = 5432 -> int -const Npgsql.PostgresErrorCodes.ActiveSqlTransaction = "25001" -> string! -const Npgsql.PostgresErrorCodes.AdminShutdown = "57P01" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousAlias = "42P09" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousColumn = "42702" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousFunction = "42725" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousParameter = "42P08" -> string! -const Npgsql.PostgresErrorCodes.ArraySubscriptError = "2202E" -> string! -const Npgsql.PostgresErrorCodes.AssertFailure = "P0004" -> string! -const Npgsql.PostgresErrorCodes.BadCopyFileFormat = "22P04" -> string! -const Npgsql.PostgresErrorCodes.BranchTransactionAlreadyActive = "25002" -> string! -const Npgsql.PostgresErrorCodes.CannotCoerce = "42846" -> string! -const Npgsql.PostgresErrorCodes.CannotConnectNow = "57P03" -> string! -const Npgsql.PostgresErrorCodes.CantChangeRuntimeParam = "55P02" -> string! -const Npgsql.PostgresErrorCodes.CardinalityViolation = "21000" -> string! -const Npgsql.PostgresErrorCodes.CaseNotFound = "20000" -> string! -const Npgsql.PostgresErrorCodes.CharacterNotInRepertoire = "22021" -> string! -const Npgsql.PostgresErrorCodes.CheckViolation = "23514" -> string! -const Npgsql.PostgresErrorCodes.CollationMismatch = "42P21" -> string! -const Npgsql.PostgresErrorCodes.ConfigFileError = "F0000" -> string! -const Npgsql.PostgresErrorCodes.ConfigurationLimitExceeded = "53400" -> string! -const Npgsql.PostgresErrorCodes.ConnectionDoesNotExist = "08003" -> string! -const Npgsql.PostgresErrorCodes.ConnectionException = "08000" -> string! -const Npgsql.PostgresErrorCodes.ConnectionFailure = "08006" -> string! -const Npgsql.PostgresErrorCodes.ContainingSqlNotPermittedExternalRoutineException = "38001" -> string! -const Npgsql.PostgresErrorCodes.CrashShutdown = "57P02" -> string! -const Npgsql.PostgresErrorCodes.DatabaseDropped = "57P04" -> string! -const Npgsql.PostgresErrorCodes.DataCorrupted = "XX001" -> string! -const Npgsql.PostgresErrorCodes.DataException = "22000" -> string! -const Npgsql.PostgresErrorCodes.DatatypeMismatch = "42804" -> string! -const Npgsql.PostgresErrorCodes.DatetimeFieldOverflow = "22008" -> string! -const Npgsql.PostgresErrorCodes.DeadlockDetected = "40P01" -> string! -const Npgsql.PostgresErrorCodes.DependentObjectsStillExist = "2BP01" -> string! -const Npgsql.PostgresErrorCodes.DependentPrivilegeDescriptorsStillExist = "2B000" -> string! -const Npgsql.PostgresErrorCodes.DeprecatedFeatureWarning = "01P01" -> string! -const Npgsql.PostgresErrorCodes.DiagnosticsException = "0Z000" -> string! -const Npgsql.PostgresErrorCodes.DiskFull = "53100" -> string! -const Npgsql.PostgresErrorCodes.DivisionByZero = "22012" -> string! -const Npgsql.PostgresErrorCodes.DuplicateAlias = "42712" -> string! -const Npgsql.PostgresErrorCodes.DuplicateColumn = "42701" -> string! -const Npgsql.PostgresErrorCodes.DuplicateCursor = "42P03" -> string! -const Npgsql.PostgresErrorCodes.DuplicateDatabase = "42P04" -> string! -const Npgsql.PostgresErrorCodes.DuplicateFile = "58P02" -> string! -const Npgsql.PostgresErrorCodes.DuplicateFunction = "42723" -> string! -const Npgsql.PostgresErrorCodes.DuplicateObject = "42710" -> string! -const Npgsql.PostgresErrorCodes.DuplicatePreparedStatement = "42P05" -> string! -const Npgsql.PostgresErrorCodes.DuplicateSchema = "42P06" -> string! -const Npgsql.PostgresErrorCodes.DuplicateTable = "42P07" -> string! -const Npgsql.PostgresErrorCodes.DynamicResultSetsReturnedWarning = "0100C" -> string! -const Npgsql.PostgresErrorCodes.ErrorInAssignment = "22005" -> string! -const Npgsql.PostgresErrorCodes.EscapeCharacterConflict = "2200B" -> string! -const Npgsql.PostgresErrorCodes.EventTriggerProtocolViolatedExternalRoutineInvocationException = "39P03" -> string! -const Npgsql.PostgresErrorCodes.ExclusionViolation = "23P01" -> string! -const Npgsql.PostgresErrorCodes.ExternalRoutineException = "38000" -> string! -const Npgsql.PostgresErrorCodes.ExternalRoutineInvocationException = "39000" -> string! -const Npgsql.PostgresErrorCodes.FdwColumnNameNotFound = "HV005" -> string! -const Npgsql.PostgresErrorCodes.FdwDynamicParameterValueNeeded = "HV002" -> string! -const Npgsql.PostgresErrorCodes.FdwError = "HV000" -> string! -const Npgsql.PostgresErrorCodes.FdwFunctionSequenceError = "HV010" -> string! -const Npgsql.PostgresErrorCodes.FdwInconsistentDescriptorInformation = "HV021" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidAttributeValue = "HV024" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidColumnName = "HV007" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidColumnNumber = "HV008" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidDataType = "HV004" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidDataTypeDescriptors = "HV006" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidDescriptorFieldIdentifier = "HV091" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidHandle = "HV00B" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidOptionIndex = "HV00C" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidOptionName = "HV00D" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidStringFormat = "HV00A" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidStringLengthOrBufferLength = "HV090" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidUseOfNullPointer = "HV009" -> string! -const Npgsql.PostgresErrorCodes.FdwNoSchemas = "HV00P" -> string! -const Npgsql.PostgresErrorCodes.FdwOptionNameNotFound = "HV00J" -> string! -const Npgsql.PostgresErrorCodes.FdwOutOfMemory = "HV001" -> string! -const Npgsql.PostgresErrorCodes.FdwReplyHandle = "HV00K" -> string! -const Npgsql.PostgresErrorCodes.FdwSchemaNotFound = "HV00Q" -> string! -const Npgsql.PostgresErrorCodes.FdwTableNotFound = "HV00R" -> string! -const Npgsql.PostgresErrorCodes.FdwTooManyHandles = "HV014" -> string! -const Npgsql.PostgresErrorCodes.FdwUnableToCreateExecution = "HV00L" -> string! -const Npgsql.PostgresErrorCodes.FdwUnableToCreateReply = "HV00M" -> string! -const Npgsql.PostgresErrorCodes.FdwUnableToEstablishConnection = "HV00N" -> string! -const Npgsql.PostgresErrorCodes.FeatureNotSupported = "0A000" -> string! -const Npgsql.PostgresErrorCodes.FloatingPointException = "22P01" -> string! -const Npgsql.PostgresErrorCodes.ForeignKeyViolation = "23503" -> string! -const Npgsql.PostgresErrorCodes.FunctionExecutedNoReturnStatementSqlRoutineException = "2F005" -> string! -const Npgsql.PostgresErrorCodes.GroupingError = "42803" -> string! -const Npgsql.PostgresErrorCodes.HeldCursorRequiresSameIsolationLevel = "25008" -> string! -const Npgsql.PostgresErrorCodes.ImplicitZeroBitPaddingWarning = "01008" -> string! -const Npgsql.PostgresErrorCodes.InappropriateAccessModeForBranchTransaction = "25003" -> string! -const Npgsql.PostgresErrorCodes.InappropriateIsolationLevelForBranchTransaction = "25004" -> string! -const Npgsql.PostgresErrorCodes.IndeterminateCollation = "42P22" -> string! -const Npgsql.PostgresErrorCodes.IndeterminateDatatype = "42P18" -> string! -const Npgsql.PostgresErrorCodes.IndexCorrupted = "XX002" -> string! -const Npgsql.PostgresErrorCodes.IndicatorOverflow = "22022" -> string! -const Npgsql.PostgresErrorCodes.InFailedSqlTransaction = "25P02" -> string! -const Npgsql.PostgresErrorCodes.InsufficientPrivilege = "42501" -> string! -const Npgsql.PostgresErrorCodes.InsufficientResources = "53000" -> string! -const Npgsql.PostgresErrorCodes.IntegrityConstraintViolation = "23000" -> string! -const Npgsql.PostgresErrorCodes.InternalError = "XX000" -> string! -const Npgsql.PostgresErrorCodes.IntervalFieldOverflow = "22015" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForLogarithm = "2201E" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForNthValueFunction = "22016" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForNtileFunction = "22014" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForPowerFunction = "2201F" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForWidthBucketFunction = "2201G" -> string! -const Npgsql.PostgresErrorCodes.InvalidAuthorizationSpecification = "28000" -> string! -const Npgsql.PostgresErrorCodes.InvalidBinaryRepresentation = "22P03" -> string! -const Npgsql.PostgresErrorCodes.InvalidCatalogName = "3D000" -> string! -const Npgsql.PostgresErrorCodes.InvalidCharacterValueForCast = "22018" -> string! -const Npgsql.PostgresErrorCodes.InvalidColumnDefinition = "42611" -> string! -const Npgsql.PostgresErrorCodes.InvalidColumnReference = "42P10" -> string! -const Npgsql.PostgresErrorCodes.InvalidCursorDefinition = "42P11" -> string! -const Npgsql.PostgresErrorCodes.InvalidCursorName = "34000" -> string! -const Npgsql.PostgresErrorCodes.InvalidCursorState = "24000" -> string! -const Npgsql.PostgresErrorCodes.InvalidDatabaseDefinition = "42P12" -> string! -const Npgsql.PostgresErrorCodes.InvalidDatetimeFormat = "22007" -> string! -const Npgsql.PostgresErrorCodes.InvalidEscapeCharacter = "22019" -> string! -const Npgsql.PostgresErrorCodes.InvalidEscapeOctet = "2200D" -> string! -const Npgsql.PostgresErrorCodes.InvalidEscapeSequence = "22025" -> string! -const Npgsql.PostgresErrorCodes.InvalidForeignKey = "42830" -> string! -const Npgsql.PostgresErrorCodes.InvalidFunctionDefinition = "42P13" -> string! -const Npgsql.PostgresErrorCodes.InvalidGrantOperation = "0LP01" -> string! -const Npgsql.PostgresErrorCodes.InvalidGrantor = "0L000" -> string! -const Npgsql.PostgresErrorCodes.InvalidIndicatorParameterValue = "22010" -> string! -const Npgsql.PostgresErrorCodes.InvalidLocatorSpecification = "0F001" -> string! -const Npgsql.PostgresErrorCodes.InvalidName = "42602" -> string! -const Npgsql.PostgresErrorCodes.InvalidObjectDefinition = "42P17" -> string! -const Npgsql.PostgresErrorCodes.InvalidParameterValue = "22023" -> string! -const Npgsql.PostgresErrorCodes.InvalidPassword = "28P01" -> string! -const Npgsql.PostgresErrorCodes.InvalidPreparedStatementDefinition = "42P14" -> string! -const Npgsql.PostgresErrorCodes.InvalidRecursion = "42P19" -> string! -const Npgsql.PostgresErrorCodes.InvalidRegularExpression = "2201B" -> string! -const Npgsql.PostgresErrorCodes.InvalidRoleSpecification = "0P000" -> string! -const Npgsql.PostgresErrorCodes.InvalidRowCountInLimitClause = "2201W" -> string! -const Npgsql.PostgresErrorCodes.InvalidRowCountInResultOffsetClause = "2201X" -> string! -const Npgsql.PostgresErrorCodes.InvalidSavepointSpecification = "3B001" -> string! -const Npgsql.PostgresErrorCodes.InvalidSchemaDefinition = "42P15" -> string! -const Npgsql.PostgresErrorCodes.InvalidSchemaName = "3F000" -> string! -const Npgsql.PostgresErrorCodes.InvalidSqlStatementName = "26000" -> string! -const Npgsql.PostgresErrorCodes.InvalidSqlstateReturnedExternalRoutineInvocationException = "39001" -> string! -const Npgsql.PostgresErrorCodes.InvalidTableDefinition = "42P16" -> string! -const Npgsql.PostgresErrorCodes.InvalidTablesampleArgument = "2202H" -> string! -const Npgsql.PostgresErrorCodes.InvalidTablesampleRepeat = "2202G" -> string! -const Npgsql.PostgresErrorCodes.InvalidTextRepresentation = "22P02" -> string! -const Npgsql.PostgresErrorCodes.InvalidTimeZoneDisplacementValue = "22009" -> string! -const Npgsql.PostgresErrorCodes.InvalidTransactionInitiation = "0B000" -> string! -const Npgsql.PostgresErrorCodes.InvalidTransactionState = "25000" -> string! -const Npgsql.PostgresErrorCodes.InvalidTransactionTermination = "2D000" -> string! -const Npgsql.PostgresErrorCodes.InvalidUseOfEscapeCharacter = "2200C" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlComment = "2200S" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlContent = "2200N" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlDocument = "2200M" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlProcessingInstruction = "2200T" -> string! -const Npgsql.PostgresErrorCodes.IoError = "58030" -> string! -const Npgsql.PostgresErrorCodes.LocatorException = "0F000" -> string! -const Npgsql.PostgresErrorCodes.LockFileExists = "F0001" -> string! -const Npgsql.PostgresErrorCodes.LockNotAvailable = "55P03" -> string! -const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedExternalRoutineException = "38002" -> string! -const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedSqlRoutineException = "2F002" -> string! -const Npgsql.PostgresErrorCodes.MostSpecificTypeMismatch = "2200G" -> string! -const Npgsql.PostgresErrorCodes.NameTooLong = "42622" -> string! -const Npgsql.PostgresErrorCodes.NoActiveSqlTransaction = "25P01" -> string! -const Npgsql.PostgresErrorCodes.NoActiveSqlTransactionForBranchTransaction = "25005" -> string! -const Npgsql.PostgresErrorCodes.NoAdditionalDynamicResultSetsReturned = "02001" -> string! -const Npgsql.PostgresErrorCodes.NoData = "02000" -> string! -const Npgsql.PostgresErrorCodes.NoDataFound = "P0002" -> string! -const Npgsql.PostgresErrorCodes.NonstandardUseOfEscapeCharacter = "22P06" -> string! -const Npgsql.PostgresErrorCodes.NotAnXmlDocument = "2200L" -> string! -const Npgsql.PostgresErrorCodes.NotNullViolation = "23502" -> string! -const Npgsql.PostgresErrorCodes.NullValueEliminatedInSetFunctionWarning = "01003" -> string! -const Npgsql.PostgresErrorCodes.NullValueNoIndicatorParameter = "22002" -> string! -const Npgsql.PostgresErrorCodes.NullValueNotAllowed = "22004" -> string! -const Npgsql.PostgresErrorCodes.NullValueNotAllowedExternalRoutineInvocationException = "39004" -> string! -const Npgsql.PostgresErrorCodes.NumericValueOutOfRange = "22003" -> string! -const Npgsql.PostgresErrorCodes.ObjectInUse = "55006" -> string! -const Npgsql.PostgresErrorCodes.ObjectNotInPrerequisiteState = "55000" -> string! -const Npgsql.PostgresErrorCodes.OperatorIntervention = "57000" -> string! -const Npgsql.PostgresErrorCodes.OutOfMemory = "53200" -> string! -const Npgsql.PostgresErrorCodes.PlpgsqlError = "P0000" -> string! -const Npgsql.PostgresErrorCodes.PrivilegeNotGrantedWarning = "01007" -> string! -const Npgsql.PostgresErrorCodes.PrivilegeNotRevokedWarning = "01006" -> string! -const Npgsql.PostgresErrorCodes.ProgramLimitExceeded = "54000" -> string! -const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedExternalRoutineException = "38003" -> string! -const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedSqlRoutineException = "2F003" -> string! -const Npgsql.PostgresErrorCodes.ProtocolViolation = "08P01" -> string! -const Npgsql.PostgresErrorCodes.QueryCanceled = "57014" -> string! -const Npgsql.PostgresErrorCodes.RaiseException = "P0001" -> string! -const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedExternalRoutineException = "38004" -> string! -const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedSqlRoutineException = "2F004" -> string! -const Npgsql.PostgresErrorCodes.ReadOnlySqlTransaction = "25006" -> string! -const Npgsql.PostgresErrorCodes.ReservedName = "42939" -> string! -const Npgsql.PostgresErrorCodes.RestrictViolation = "23001" -> string! -const Npgsql.PostgresErrorCodes.SavepointException = "3B000" -> string! -const Npgsql.PostgresErrorCodes.SchemaAndDataStatementMixingNotSupported = "25007" -> string! -const Npgsql.PostgresErrorCodes.SerializationFailure = "40001" -> string! -const Npgsql.PostgresErrorCodes.SnapshotFailure = "72000" -> string! -const Npgsql.PostgresErrorCodes.SqlClientUnableToEstablishSqlConnection = "08001" -> string! -const Npgsql.PostgresErrorCodes.SqlRoutineException = "2F000" -> string! -const Npgsql.PostgresErrorCodes.SqlServerRejectedEstablishmentOfSqlConnection = "08004" -> string! -const Npgsql.PostgresErrorCodes.SqlStatementNotYetComplete = "03000" -> string! -const Npgsql.PostgresErrorCodes.SrfProtocolViolatedExternalRoutineInvocationException = "39P02" -> string! -const Npgsql.PostgresErrorCodes.StackedDiagnosticsAccessedWithoutActiveHandler = "0Z002" -> string! -const Npgsql.PostgresErrorCodes.StatementCompletionUnknown = "40003" -> string! -const Npgsql.PostgresErrorCodes.StatementTooComplex = "54001" -> string! -const Npgsql.PostgresErrorCodes.StringDataLengthMismatch = "22026" -> string! -const Npgsql.PostgresErrorCodes.StringDataRightTruncation = "22001" -> string! -const Npgsql.PostgresErrorCodes.StringDataRightTruncationWarning = "01004" -> string! -const Npgsql.PostgresErrorCodes.SubstringError = "22011" -> string! -const Npgsql.PostgresErrorCodes.SuccessfulCompletion = "00000" -> string! -const Npgsql.PostgresErrorCodes.SyntaxError = "42601" -> string! -const Npgsql.PostgresErrorCodes.SyntaxErrorOrAccessRuleViolation = "42000" -> string! -const Npgsql.PostgresErrorCodes.SystemError = "58000" -> string! -const Npgsql.PostgresErrorCodes.TooManyArguments = "54023" -> string! -const Npgsql.PostgresErrorCodes.TooManyColumns = "54011" -> string! -const Npgsql.PostgresErrorCodes.TooManyConnections = "53300" -> string! -const Npgsql.PostgresErrorCodes.TooManyRows = "P0003" -> string! -const Npgsql.PostgresErrorCodes.TransactionIntegrityConstraintViolation = "40002" -> string! -const Npgsql.PostgresErrorCodes.TransactionResolutionUnknown = "08007" -> string! -const Npgsql.PostgresErrorCodes.TransactionRollback = "40000" -> string! -const Npgsql.PostgresErrorCodes.TriggeredActionException = "09000" -> string! -const Npgsql.PostgresErrorCodes.TriggeredDataChangeViolation = "27000" -> string! -const Npgsql.PostgresErrorCodes.TriggerProtocolViolatedExternalRoutineInvocationException = "39P01" -> string! -const Npgsql.PostgresErrorCodes.TrimError = "22027" -> string! -const Npgsql.PostgresErrorCodes.UndefinedColumn = "42703" -> string! -const Npgsql.PostgresErrorCodes.UndefinedFile = "58P01" -> string! -const Npgsql.PostgresErrorCodes.UndefinedFunction = "42883" -> string! -const Npgsql.PostgresErrorCodes.UndefinedObject = "42704" -> string! -const Npgsql.PostgresErrorCodes.UndefinedParameter = "42P02" -> string! -const Npgsql.PostgresErrorCodes.UndefinedTable = "42P01" -> string! -const Npgsql.PostgresErrorCodes.UniqueViolation = "23505" -> string! -const Npgsql.PostgresErrorCodes.UnterminatedCString = "22024" -> string! -const Npgsql.PostgresErrorCodes.UntranslatableCharacter = "22P05" -> string! -const Npgsql.PostgresErrorCodes.Warning = "01000" -> string! -const Npgsql.PostgresErrorCodes.WindowingError = "42P20" -> string! -const Npgsql.PostgresErrorCodes.WithCheckOptionViolation = "44000" -> string! -const Npgsql.PostgresErrorCodes.WrongObjectType = "42809" -> string! -const Npgsql.PostgresErrorCodes.ZeroLengthCharacterString = "2200F" -> string! -const NpgsqlTypes.NpgsqlDate.MaxYear = 5874897 -> int -const NpgsqlTypes.NpgsqlDate.MinYear = -4714 -> int -const NpgsqlTypes.NpgsqlTimeSpan.DaysPerMonth = 30 -> int -const NpgsqlTypes.NpgsqlTimeSpan.HoursPerDay = 24 -> int -const NpgsqlTypes.NpgsqlTimeSpan.MonthsPerYear = 12 -> int -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerDay = 864000000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerHour = 36000000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMicrosecond = 10 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMillsecond = 10000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMinute = 600000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMonth = 25920000000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerSecond = 10000000 -> long +#nullable enable Npgsql.ArrayNullabilityMode Npgsql.ArrayNullabilityMode.Always = 1 -> Npgsql.ArrayNullabilityMode Npgsql.ArrayNullabilityMode.Never = 0 -> Npgsql.ArrayNullabilityMode @@ -262,31 +8,24 @@ Npgsql.BackendMessages.FieldDescription.TypeModifier.get -> int Npgsql.BackendMessages.FieldDescription.TypeModifier.set -> void Npgsql.BackendMessages.FieldDescription.TypeSize.get -> short Npgsql.BackendMessages.FieldDescription.TypeSize.set -> void +Npgsql.ChannelBinding +Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding +Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding +Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding +Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Prefer = 1 -> Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Require = 2 -> Npgsql.GssEncryptionMode Npgsql.INpgsqlNameTranslator Npgsql.INpgsqlNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.INpgsqlNameTranslator.TranslateTypeName(string! clrName) -> string! -Npgsql.Logging.ConsoleLoggingProvider -Npgsql.Logging.ConsoleLoggingProvider.ConsoleLoggingProvider(Npgsql.Logging.NpgsqlLogLevel minLevel = Npgsql.Logging.NpgsqlLogLevel.Info, bool printLevel = false, bool printConnectorId = false) -> void -Npgsql.Logging.ConsoleLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -Npgsql.Logging.INpgsqlLoggingProvider -Npgsql.Logging.INpgsqlLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -Npgsql.Logging.NpgsqlLogger -Npgsql.Logging.NpgsqlLogger.NpgsqlLogger() -> void -Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Debug = 2 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Error = 5 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Fatal = 6 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Info = 3 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Trace = 1 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Warn = 4 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogManager Npgsql.NameTranslation.NpgsqlNullNameTranslator Npgsql.NameTranslation.NpgsqlNullNameTranslator.NpgsqlNullNameTranslator() -> void Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator -Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator() -> void -Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode) -> void +Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(System.Globalization.CultureInfo? culture = null) -> void +Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode, System.Globalization.CultureInfo? culture = null) -> void Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NoticeEventHandler @@ -295,6 +34,9 @@ Npgsql.NpgsqlBatch Npgsql.NpgsqlBatch.BatchCommands.get -> Npgsql.NpgsqlBatchCommandCollection! Npgsql.NpgsqlBatch.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.NpgsqlBatch.Connection.set -> void +Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! +Npgsql.NpgsqlBatch.EnableErrorBarriers.get -> bool +Npgsql.NpgsqlBatch.EnableErrorBarriers.set -> void Npgsql.NpgsqlBatch.ExecuteReader(System.Data.CommandBehavior behavior = System.Data.CommandBehavior.Default) -> Npgsql.NpgsqlDataReader! Npgsql.NpgsqlBatch.ExecuteReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBatch.ExecuteReaderAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! @@ -302,6 +44,8 @@ Npgsql.NpgsqlBatch.NpgsqlBatch(Npgsql.NpgsqlConnection? connection = null, Npgsq Npgsql.NpgsqlBatch.Transaction.get -> Npgsql.NpgsqlTransaction? Npgsql.NpgsqlBatch.Transaction.set -> void Npgsql.NpgsqlBatchCommand +Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.get -> bool? +Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.set -> void Npgsql.NpgsqlBatchCommand.NpgsqlBatchCommand() -> void Npgsql.NpgsqlBatchCommand.NpgsqlBatchCommand(string! commandText) -> void Npgsql.NpgsqlBatchCommand.OID.get -> uint @@ -346,16 +90,15 @@ Npgsql.NpgsqlBinaryImporter.Write(T value) -> void Npgsql.NpgsqlBinaryImporter.Write(T value, NpgsqlTypes.NpgsqlDbType npgsqlDbType) -> void Npgsql.NpgsqlBinaryImporter.Write(T value, string! dataTypeName) -> void Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, NpgsqlTypes.NpgsqlDbType npgsqlDbType, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, string! dataTypeName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, string! dataTypeName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBinaryImporter.WriteNull() -> void Npgsql.NpgsqlBinaryImporter.WriteNullAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void -Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void +Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlCommand Npgsql.NpgsqlCommand.AllResultTypesAreUnknown.get -> bool Npgsql.NpgsqlCommand.AllResultTypesAreUnknown.set -> void -Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! Npgsql.NpgsqlCommand.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.NpgsqlCommand.Connection.set -> void Npgsql.NpgsqlCommand.CreateParameter() -> Npgsql.NpgsqlParameter! @@ -392,15 +135,16 @@ Npgsql.NpgsqlConnection.BeginBinaryImport(string! copyFromCommand) -> Npgsql.Npg Npgsql.NpgsqlConnection.BeginBinaryImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.BeginRawBinaryCopy(string! copyCommand) -> Npgsql.NpgsqlRawCopyStream! Npgsql.NpgsqlConnection.BeginRawBinaryCopyAsync(string! copyCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> System.IO.TextReader! -Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> System.IO.TextWriter! -Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> Npgsql.NpgsqlCopyTextReader! +Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> Npgsql.NpgsqlCopyTextWriter! +Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.BeginTransaction() -> Npgsql.NpgsqlTransaction! Npgsql.NpgsqlConnection.BeginTransaction(System.Data.IsolationLevel level) -> Npgsql.NpgsqlTransaction! Npgsql.NpgsqlConnection.BeginTransactionAsync(System.Data.IsolationLevel level, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.BeginTransactionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.CloneWith(string! connectionString) -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.CommandTimeout.get -> int Npgsql.NpgsqlConnection.CreateBatch() -> Npgsql.NpgsqlBatch! Npgsql.NpgsqlConnection.CreateCommand() -> Npgsql.NpgsqlCommand! @@ -408,27 +152,23 @@ Npgsql.NpgsqlConnection.Disposed -> System.EventHandler? Npgsql.NpgsqlConnection.FullState.get -> System.Data.ConnectionState Npgsql.NpgsqlConnection.HasIntegerDateTimes.get -> bool Npgsql.NpgsqlConnection.Host.get -> string? -Npgsql.NpgsqlConnection.IntegratedSecurity.get -> bool -Npgsql.NpgsqlConnection.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -Npgsql.NpgsqlConnection.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void Npgsql.NpgsqlConnection.Notice -> Npgsql.NoticeEventHandler? Npgsql.NpgsqlConnection.Notification -> Npgsql.NotificationEventHandler? Npgsql.NpgsqlConnection.NpgsqlConnection() -> void Npgsql.NpgsqlConnection.NpgsqlConnection(string? connectionString) -> void -Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.get -> Npgsql.PhysicalOpenAsyncCallback? -Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.set -> void -Npgsql.NpgsqlConnection.PhysicalOpenCallback.get -> Npgsql.PhysicalOpenCallback? -Npgsql.NpgsqlConnection.PhysicalOpenCallback.set -> void Npgsql.NpgsqlConnection.Port.get -> int -Npgsql.NpgsqlConnection.PostgresParameters.get -> System.Collections.Generic.IReadOnlyDictionary! Npgsql.NpgsqlConnection.PostgreSqlVersion.get -> System.Version! +Npgsql.NpgsqlConnection.PostgresParameters.get -> System.Collections.Generic.IReadOnlyDictionary! Npgsql.NpgsqlConnection.ProcessID.get -> int Npgsql.NpgsqlConnection.ProvideClientCertificatesCallback.get -> Npgsql.ProvideClientCertificatesCallback? Npgsql.NpgsqlConnection.ProvideClientCertificatesCallback.set -> void Npgsql.NpgsqlConnection.ProvidePasswordCallback.get -> Npgsql.ProvidePasswordCallback? Npgsql.NpgsqlConnection.ProvidePasswordCallback.set -> void Npgsql.NpgsqlConnection.ReloadTypes() -> void -Npgsql.NpgsqlConnection.Settings.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? +Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlConnection.Timezone.get -> string! Npgsql.NpgsqlConnection.TypeMapper.get -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlConnection.UnprepareAll() -> void @@ -436,11 +176,11 @@ Npgsql.NpgsqlConnection.UserCertificateValidationCallback.get -> System.Net.Secu Npgsql.NpgsqlConnection.UserCertificateValidationCallback.set -> void Npgsql.NpgsqlConnection.UserName.get -> string? Npgsql.NpgsqlConnection.Wait() -> void -Npgsql.NpgsqlConnection.Wait(int timeout) -> bool Npgsql.NpgsqlConnection.Wait(System.TimeSpan timeout) -> bool -Npgsql.NpgsqlConnection.WaitAsync(int timeout, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.Wait(int timeout) -> bool Npgsql.NpgsqlConnection.WaitAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.WaitAsync(System.TimeSpan timeout, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.WaitAsync(int timeout, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnectionStringBuilder Npgsql.NpgsqlConnectionStringBuilder.Add(System.Collections.Generic.KeyValuePair item) -> void Npgsql.NpgsqlConnectionStringBuilder.ApplicationName.get -> string? @@ -449,16 +189,12 @@ Npgsql.NpgsqlConnectionStringBuilder.ArrayNullabilityMode.get -> Npgsql.ArrayNul Npgsql.NpgsqlConnectionStringBuilder.ArrayNullabilityMode.set -> void Npgsql.NpgsqlConnectionStringBuilder.AutoPrepareMinUsages.get -> int Npgsql.NpgsqlConnectionStringBuilder.AutoPrepareMinUsages.set -> void -Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.set -> void Npgsql.NpgsqlConnectionStringBuilder.CancellationTimeout.get -> int Npgsql.NpgsqlConnectionStringBuilder.CancellationTimeout.set -> void +Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding +Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlConnectionStringBuilder.CheckCertificateRevocation.get -> bool Npgsql.NpgsqlConnectionStringBuilder.CheckCertificateRevocation.set -> void -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.set -> void -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.set -> void Npgsql.NpgsqlConnectionStringBuilder.ClientEncoding.get -> string? Npgsql.NpgsqlConnectionStringBuilder.ClientEncoding.set -> void Npgsql.NpgsqlConnectionStringBuilder.CommandTimeout.get -> int @@ -470,10 +206,6 @@ Npgsql.NpgsqlConnectionStringBuilder.ConnectionLifetime.set -> void Npgsql.NpgsqlConnectionStringBuilder.ConnectionPruningInterval.get -> int Npgsql.NpgsqlConnectionStringBuilder.ConnectionPruningInterval.set -> void Npgsql.NpgsqlConnectionStringBuilder.Contains(System.Collections.Generic.KeyValuePair item) -> bool -Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.set -> void -Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.set -> void Npgsql.NpgsqlConnectionStringBuilder.CopyTo(System.Collections.Generic.KeyValuePair[]! array, int arrayIndex) -> void Npgsql.NpgsqlConnectionStringBuilder.Database.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Database.set -> void @@ -481,23 +213,19 @@ Npgsql.NpgsqlConnectionStringBuilder.Encoding.get -> string! Npgsql.NpgsqlConnectionStringBuilder.Encoding.set -> void Npgsql.NpgsqlConnectionStringBuilder.Enlist.get -> bool Npgsql.NpgsqlConnectionStringBuilder.Enlist.set -> void -Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.set -> void -Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.set -> void Npgsql.NpgsqlConnectionStringBuilder.GetEnumerator() -> System.Collections.Generic.IEnumerator>! +Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.get -> Npgsql.GssEncryptionMode +Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.set -> void Npgsql.NpgsqlConnectionStringBuilder.Host.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Host.set -> void Npgsql.NpgsqlConnectionStringBuilder.HostRecheckSeconds.get -> int Npgsql.NpgsqlConnectionStringBuilder.HostRecheckSeconds.set -> void Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetail.get -> bool Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetail.set -> void -Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.set -> void +Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.get -> bool +Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.set -> void Npgsql.NpgsqlConnectionStringBuilder.IncludeRealm.get -> bool Npgsql.NpgsqlConnectionStringBuilder.IncludeRealm.set -> void -Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.set -> void Npgsql.NpgsqlConnectionStringBuilder.InternalCommandTimeout.get -> int Npgsql.NpgsqlConnectionStringBuilder.InternalCommandTimeout.set -> void Npgsql.NpgsqlConnectionStringBuilder.KeepAlive.get -> int @@ -536,11 +264,11 @@ Npgsql.NpgsqlConnectionStringBuilder.Pooling.get -> bool Npgsql.NpgsqlConnectionStringBuilder.Pooling.set -> void Npgsql.NpgsqlConnectionStringBuilder.Port.get -> int Npgsql.NpgsqlConnectionStringBuilder.Port.set -> void -Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.set -> void Npgsql.NpgsqlConnectionStringBuilder.ReadBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.ReadBufferSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.Remove(System.Collections.Generic.KeyValuePair item) -> bool +Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? +Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void Npgsql.NpgsqlConnectionStringBuilder.RootCertificate.get -> string? Npgsql.NpgsqlConnectionStringBuilder.RootCertificate.set -> void Npgsql.NpgsqlConnectionStringBuilder.SearchPath.get -> string? @@ -557,6 +285,8 @@ Npgsql.NpgsqlConnectionStringBuilder.SslKey.get -> string? Npgsql.NpgsqlConnectionStringBuilder.SslKey.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslMode.get -> Npgsql.SslMode Npgsql.NpgsqlConnectionStringBuilder.SslMode.set -> void +Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation +Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslPassword.get -> string? Npgsql.NpgsqlConnectionStringBuilder.SslPassword.set -> void Npgsql.NpgsqlConnectionStringBuilder.TargetSessionAttributes.get -> string? @@ -573,30 +303,24 @@ Npgsql.NpgsqlConnectionStringBuilder.Timezone.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Timezone.set -> void Npgsql.NpgsqlConnectionStringBuilder.TrustServerCertificate.get -> bool Npgsql.NpgsqlConnectionStringBuilder.TrustServerCertificate.set -> void -Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.set -> void -Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.set -> void Npgsql.NpgsqlConnectionStringBuilder.Username.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Username.set -> void -Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.set -> void Npgsql.NpgsqlConnectionStringBuilder.Values.get -> System.Collections.Generic.ICollection! Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void -Npgsql.NpgsqlConnectionStringPropertyAttribute -Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute() -> void -Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute(params string![]! synonyms) -> void -Npgsql.NpgsqlConnectionStringPropertyAttribute.Synonyms.get -> string![]! Npgsql.NpgsqlCopyTextReader Npgsql.NpgsqlCopyTextReader.Cancel() -> void Npgsql.NpgsqlCopyTextReader.CancelAsync() -> System.Threading.Tasks.Task! Npgsql.NpgsqlCopyTextReader.DisposeAsync() -> System.Threading.Tasks.ValueTask +Npgsql.NpgsqlCopyTextReader.Timeout.get -> int +Npgsql.NpgsqlCopyTextReader.Timeout.set -> void Npgsql.NpgsqlCopyTextWriter Npgsql.NpgsqlCopyTextWriter.Cancel() -> void Npgsql.NpgsqlCopyTextWriter.CancelAsync() -> System.Threading.Tasks.Task! +Npgsql.NpgsqlCopyTextWriter.Timeout.get -> int +Npgsql.NpgsqlCopyTextWriter.Timeout.set -> void Npgsql.NpgsqlDataAdapter Npgsql.NpgsqlDataAdapter.DeleteCommand.get -> Npgsql.NpgsqlCommand? Npgsql.NpgsqlDataAdapter.DeleteCommand.set -> void @@ -614,27 +338,72 @@ Npgsql.NpgsqlDataAdapter.UpdateCommand.get -> Npgsql.NpgsqlCommand? Npgsql.NpgsqlDataAdapter.UpdateCommand.set -> void Npgsql.NpgsqlDataReader Npgsql.NpgsqlDataReader.GetColumnSchema() -> System.Collections.ObjectModel.ReadOnlyCollection! -Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! Npgsql.NpgsqlDataReader.GetData(int ordinal) -> Npgsql.NpgsqlNestedDataReader! Npgsql.NpgsqlDataReader.GetDataTypeOID(int ordinal) -> uint -Npgsql.NpgsqlDataReader.GetDate(int ordinal) -> NpgsqlTypes.NpgsqlDate -Npgsql.NpgsqlDataReader.GetInterval(int ordinal) -> NpgsqlTypes.NpgsqlTimeSpan Npgsql.NpgsqlDataReader.GetPostgresType(int ordinal) -> Npgsql.PostgresTypes.PostgresType! Npgsql.NpgsqlDataReader.GetStreamAsync(int ordinal, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataReader.GetTextReaderAsync(int ordinal, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataReader.GetTimeSpan(int ordinal) -> System.TimeSpan -Npgsql.NpgsqlDataReader.GetTimeStamp(int ordinal) -> NpgsqlTypes.NpgsqlDateTime Npgsql.NpgsqlDataReader.IsOnRow.get -> bool Npgsql.NpgsqlDataReader.ReaderClosed -> System.EventHandler? Npgsql.NpgsqlDataReader.Rows.get -> ulong Npgsql.NpgsqlDataReader.Statements.get -> System.Collections.Generic.IReadOnlyList! +Npgsql.NpgsqlDataSource +Npgsql.NpgsqlDataSource.CreateBatch() -> Npgsql.NpgsqlBatch! +Npgsql.NpgsqlDataSource.CreateCommand(string? commandText = null) -> Npgsql.NpgsqlCommand! +Npgsql.NpgsqlDataSource.CreateConnection() -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlDataSource.OpenConnection() -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlDataSource.OpenConnectionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +Npgsql.NpgsqlDataSource.Password.set -> void +Npgsql.NpgsqlDataSource.ReloadTypes() -> void +Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlDataSourceBuilder +Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.NpgsqlDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! +Npgsql.NpgsqlDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.ConnectionString.get -> string! +Npgsql.NpgsqlDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? +Npgsql.NpgsqlDataSourceBuilder.Name.set -> void +Npgsql.NpgsqlDataSourceBuilder.NpgsqlDataSourceBuilder(string? connectionString = null) -> void +Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlException Npgsql.NpgsqlException.BatchCommand.get -> Npgsql.NpgsqlBatchCommand? Npgsql.NpgsqlException.BatchCommand.set -> void Npgsql.NpgsqlException.NpgsqlException() -> void +Npgsql.NpgsqlException.NpgsqlException(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void Npgsql.NpgsqlException.NpgsqlException(string? message) -> void Npgsql.NpgsqlException.NpgsqlException(string? message, System.Exception? innerException) -> void -Npgsql.NpgsqlException.NpgsqlException(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void Npgsql.NpgsqlFactory Npgsql.NpgsqlFactory.GetService(System.Type! serviceType) -> object? Npgsql.NpgsqlLargeObjectManager @@ -659,16 +428,23 @@ Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLoggingConfiguration +Npgsql.NpgsqlMetricsOptions +Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void +Npgsql.NpgsqlMultiHostDataSource +Npgsql.NpgsqlMultiHostDataSource.ClearDatabaseStates() -> void +Npgsql.NpgsqlMultiHostDataSource.CreateConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlMultiHostDataSource.OpenConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlMultiHostDataSource.OpenConnectionAsync(Npgsql.TargetSessionAttributes targetSessionAttributes, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +Npgsql.NpgsqlMultiHostDataSource.WithTargetSession(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlNestedDataReader Npgsql.NpgsqlNestedDataReader.GetData(int ordinal) -> Npgsql.NpgsqlNestedDataReader! Npgsql.NpgsqlNoticeEventArgs Npgsql.NpgsqlNoticeEventArgs.Notice.get -> Npgsql.PostgresNotice! Npgsql.NpgsqlNotificationEventArgs -Npgsql.NpgsqlNotificationEventArgs.AdditionalInformation.get -> string! Npgsql.NpgsqlNotificationEventArgs.Channel.get -> string! -Npgsql.NpgsqlNotificationEventArgs.Condition.get -> string! -Npgsql.NpgsqlNotificationEventArgs.Payload.get -> string! Npgsql.NpgsqlNotificationEventArgs.PID.get -> int +Npgsql.NpgsqlNotificationEventArgs.Payload.get -> string! Npgsql.NpgsqlOperationInProgressException Npgsql.NpgsqlOperationInProgressException.CommandInProgress.get -> Npgsql.NpgsqlCommand? Npgsql.NpgsqlOperationInProgressException.NpgsqlOperationInProgressException(Npgsql.NpgsqlCommand! command) -> void @@ -676,8 +452,6 @@ Npgsql.NpgsqlParameter Npgsql.NpgsqlParameter.Clone() -> Npgsql.NpgsqlParameter! Npgsql.NpgsqlParameter.Collection.get -> Npgsql.NpgsqlParameterCollection? Npgsql.NpgsqlParameter.Collection.set -> void -Npgsql.NpgsqlParameter.ConvertedValue.get -> object? -Npgsql.NpgsqlParameter.ConvertedValue.set -> void Npgsql.NpgsqlParameter.DataTypeName.get -> string? Npgsql.NpgsqlParameter.DataTypeName.set -> void Npgsql.NpgsqlParameter.NpgsqlDbType.get -> NpgsqlTypes.NpgsqlDbType @@ -688,10 +462,10 @@ Npgsql.NpgsqlParameter.NpgsqlParameter(string! parameterName, System.Data.DbType Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, NpgsqlTypes.NpgsqlDbType parameterType) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, NpgsqlTypes.NpgsqlDbType parameterType, int size) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, NpgsqlTypes.NpgsqlDbType parameterType, int size, string? sourceColumn) -> void -Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, object? value) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, System.Data.DbType parameterType) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, System.Data.DbType parameterType, int size) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, System.Data.DbType parameterType, int size, string? sourceColumn) -> void +Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, object? value) -> void Npgsql.NpgsqlParameter.NpgsqlValue.get -> object? Npgsql.NpgsqlParameter.NpgsqlValue.set -> void Npgsql.NpgsqlParameter.PostgresType.get -> Npgsql.PostgresTypes.PostgresType? @@ -723,12 +497,12 @@ Npgsql.NpgsqlParameterCollection.IndexOf(Npgsql.NpgsqlParameter! item) -> int Npgsql.NpgsqlParameterCollection.Insert(int index, Npgsql.NpgsqlParameter! item) -> void Npgsql.NpgsqlParameterCollection.Remove(Npgsql.NpgsqlParameter! item) -> bool Npgsql.NpgsqlParameterCollection.Remove(string! parameterName) -> void +Npgsql.NpgsqlParameterCollection.ToArray() -> Npgsql.NpgsqlParameter![]! +Npgsql.NpgsqlParameterCollection.TryGetValue(string! parameterName, out Npgsql.NpgsqlParameter? parameter) -> bool Npgsql.NpgsqlParameterCollection.this[int index].get -> Npgsql.NpgsqlParameter! Npgsql.NpgsqlParameterCollection.this[int index].set -> void Npgsql.NpgsqlParameterCollection.this[string! parameterName].get -> Npgsql.NpgsqlParameter! Npgsql.NpgsqlParameterCollection.this[string! parameterName].set -> void -Npgsql.NpgsqlParameterCollection.ToArray() -> Npgsql.NpgsqlParameter![]! -Npgsql.NpgsqlParameterCollection.TryGetValue(string! parameterName, out Npgsql.NpgsqlParameter? parameter) -> bool Npgsql.NpgsqlRawCopyStream Npgsql.NpgsqlRawCopyStream.Cancel() -> void Npgsql.NpgsqlRawCopyStream.CancelAsync() -> System.Threading.Tasks.Task! @@ -738,15 +512,79 @@ Npgsql.NpgsqlRowUpdatedEventHandler Npgsql.NpgsqlRowUpdatingEventArgs Npgsql.NpgsqlRowUpdatingEventArgs.NpgsqlRowUpdatingEventArgs(System.Data.DataRow! dataRow, System.Data.IDbCommand? command, System.Data.StatementType statementType, System.Data.Common.DataTableMapping! tableMapping) -> void Npgsql.NpgsqlRowUpdatingEventHandler -Npgsql.NpgsqlTracingOptions -Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void +Npgsql.NpgsqlSlimDataSourceBuilder +Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! +Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableCube() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableLTree() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.Name.get -> string? +Npgsql.NpgsqlSlimDataSourceBuilder.Name.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlTracingOptionsBuilder +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchEnrichmentCallback(System.Action? batchEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchFilter(System.Func? batchFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchSpanNameProvider(System.Func? batchSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandEnrichmentCallback(System.Action? commandEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationEnrichmentCallback(System.Action? copyOperationEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationFilter(System.Func? copyOperationFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationSpanNameProvider(System.Func? copyOperationSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.EnablePhysicalOpenTracing(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTransaction Npgsql.NpgsqlTransaction.Connection.get -> Npgsql.NpgsqlConnection? -Npgsql.PhysicalOpenAsyncCallback -Npgsql.PhysicalOpenCallback +Npgsql.NpgsqlTypeLoadingOptionsBuilder +Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTableCompositesLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTypeLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder.SetTypeLoadingSchemas(params System.Collections.Generic.IEnumerable? schemas) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! Npgsql.PostgresErrorCodes Npgsql.PostgresException -Npgsql.PostgresException.Code.get -> string! Npgsql.PostgresException.ColumnName.get -> string? Npgsql.PostgresException.ConstraintName.get -> string? Npgsql.PostgresException.DataTypeName.get -> string? @@ -767,7 +605,6 @@ Npgsql.PostgresException.Severity.get -> string! Npgsql.PostgresException.TableName.get -> string? Npgsql.PostgresException.Where.get -> string? Npgsql.PostgresNotice -Npgsql.PostgresNotice.Code.get -> string! Npgsql.PostgresNotice.ColumnName.get -> string? Npgsql.PostgresNotice.ColumnName.set -> void Npgsql.PostgresNotice.ConstraintName.get -> string? @@ -807,9 +644,9 @@ Npgsql.PostgresNotice.Where.get -> string? Npgsql.PostgresNotice.Where.set -> void Npgsql.PostgresTypes.PostgresArrayType Npgsql.PostgresTypes.PostgresArrayType.Element.get -> Npgsql.PostgresTypes.PostgresType! -Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! internalName, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void +Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! name, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void Npgsql.PostgresTypes.PostgresBaseType -Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! internalName, uint oid) -> void +Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! name, uint oid) -> void Npgsql.PostgresTypes.PostgresCompositeType Npgsql.PostgresTypes.PostgresCompositeType.Field Npgsql.PostgresTypes.PostgresCompositeType.Field.Name.get -> string! @@ -837,8 +674,6 @@ Npgsql.PostgresTypes.PostgresType.InternalName.get -> string! Npgsql.PostgresTypes.PostgresType.Name.get -> string! Npgsql.PostgresTypes.PostgresType.Namespace.get -> string! Npgsql.PostgresTypes.PostgresType.OID.get -> uint -Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, string! internalName, uint oid) -> void -Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, uint oid) -> void Npgsql.PostgresTypes.PostgresType.Range.get -> Npgsql.PostgresTypes.PostgresRangeType? Npgsql.PostgresTypes.UnknownBackendType Npgsql.ProvideClientCertificatesCallback @@ -859,6 +694,7 @@ Npgsql.Replication.LogicalSlotSnapshotInitMode.Use = 1 -> Npgsql.Replication.Log Npgsql.Replication.PgOutput.Messages.BeginMessage Npgsql.Replication.PgOutput.Messages.BeginMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.BeginMessage.TransactionFinalLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.BeginPrepareMessage Npgsql.Replication.PgOutput.Messages.CommitMessage Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags @@ -866,10 +702,16 @@ Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitLsn.get -> NpgsqlTypes. Npgsql.Replication.PgOutput.Messages.CommitMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags Npgsql.Replication.PgOutput.Messages.CommitMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.CommitMessage.TransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.DefaultUpdateMessage Npgsql.Replication.PgOutput.Messages.DeleteMessage Npgsql.Replication.PgOutput.Messages.DeleteMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! -Npgsql.Replication.PgOutput.Messages.DeleteMessage.RelationId.get -> uint Npgsql.Replication.PgOutput.Messages.FullDeleteMessage Npgsql.Replication.PgOutput.Messages.FullDeleteMessage.OldRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! Npgsql.Replication.PgOutput.Messages.FullUpdateMessage @@ -879,7 +721,6 @@ Npgsql.Replication.PgOutput.Messages.IndexUpdateMessage.Key.get -> Npgsql.Replic Npgsql.Replication.PgOutput.Messages.InsertMessage Npgsql.Replication.PgOutput.Messages.InsertMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! Npgsql.Replication.PgOutput.Messages.InsertMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! -Npgsql.Replication.PgOutput.Messages.InsertMessage.RelationId.get -> uint Npgsql.Replication.PgOutput.Messages.KeyDeleteMessage Npgsql.Replication.PgOutput.Messages.KeyDeleteMessage.Key.get -> Npgsql.Replication.PgOutput.ReplicationTuple! Npgsql.Replication.PgOutput.Messages.LogicalDecodingMessage @@ -890,8 +731,21 @@ Npgsql.Replication.PgOutput.Messages.LogicalDecodingMessage.Prefix.get -> string Npgsql.Replication.PgOutput.Messages.OriginMessage Npgsql.Replication.PgOutput.Messages.OriginMessage.OriginCommitLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber Npgsql.Replication.PgOutput.Messages.OriginMessage.OriginName.get -> string! +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.PgOutputReplicationMessage Npgsql.Replication.PgOutput.Messages.PgOutputReplicationMessage.PgOutputReplicationMessage() -> void +Npgsql.Replication.PgOutput.Messages.PrepareMessage +Npgsql.Replication.PgOutput.Messages.PrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags +Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags +Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.TransactionPrepareTimestamp.get -> System.DateTime +Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage +Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage.TransactionGid.get -> string! Npgsql.Replication.PgOutput.Messages.RelationMessage Npgsql.Replication.PgOutput.Messages.RelationMessage.Column Npgsql.Replication.PgOutput.Messages.RelationMessage.Column.Column() -> void @@ -918,6 +772,14 @@ Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.DataTypeId.get -> uin Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.Flags.get -> byte Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.RelationMessageColumn() -> void Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.TypeModifier.get -> int +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.PreparedTransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionPrepareTimestamp.get -> System.DateTime +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionRollbackTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.StreamAbortMessage Npgsql.Replication.PgOutput.Messages.StreamAbortMessage.SubtransactionXid.get -> uint Npgsql.Replication.PgOutput.Messages.StreamCommitMessage @@ -925,15 +787,19 @@ Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.CommitLsn.get -> Npgsql Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.Flags.get -> byte Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.TransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags Npgsql.Replication.PgOutput.Messages.StreamStartMessage Npgsql.Replication.PgOutput.Messages.StreamStartMessage.StreamSegmentIndicator.get -> byte Npgsql.Replication.PgOutput.Messages.StreamStopMessage -Npgsql.Replication.PgOutput.Messages.TransactionalMessage -Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionalMessage() -> void -Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionXid.get -> uint? Npgsql.Replication.PgOutput.Messages.TransactionControlMessage Npgsql.Replication.PgOutput.Messages.TransactionControlMessage.TransactionControlMessage() -> void Npgsql.Replication.PgOutput.Messages.TransactionControlMessage.TransactionXid.get -> uint +Npgsql.Replication.PgOutput.Messages.TransactionalMessage +Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionXid.get -> uint? +Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionalMessage() -> void Npgsql.Replication.PgOutput.Messages.TruncateMessage Npgsql.Replication.PgOutput.Messages.TruncateMessage.Options.get -> Npgsql.Replication.PgOutput.Messages.TruncateMessage.TruncateOptions Npgsql.Replication.PgOutput.Messages.TruncateMessage.Relations.get -> System.Collections.Generic.IReadOnlyList! @@ -947,26 +813,38 @@ Npgsql.Replication.PgOutput.Messages.TypeMessage.Namespace.get -> string! Npgsql.Replication.PgOutput.Messages.TypeMessage.TypeId.get -> uint Npgsql.Replication.PgOutput.Messages.UpdateMessage Npgsql.Replication.PgOutput.Messages.UpdateMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! -Npgsql.Replication.PgOutput.Messages.UpdateMessage.RelationId.get -> uint +Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V1 = 1 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V2 = 2 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V3 = 3 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V4 = 4 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion Npgsql.Replication.PgOutput.PgOutputReplicationOptions Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Binary.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Equals(Npgsql.Replication.PgOutput.PgOutputReplicationOptions? other) -> bool Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Messages.get -> bool? -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> ulong +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PublicationNames.get -> System.Collections.Generic.List! -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Streaming.get -> bool? +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.StreamingMode.get -> Npgsql.Replication.PgOutput.PgOutputStreamingMode? +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.TwoPhase.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationSlot Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(Npgsql.Replication.PgOutput.PgOutputReplicationSlot! slot) -> void Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(Npgsql.Replication.ReplicationSlotOptions options) -> void Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(string! slotName) -> void +Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.Off = 0 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.On = 1 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode Npgsql.Replication.PgOutput.ReplicationTuple Npgsql.Replication.PgOutput.ReplicationTuple.NumColumns.get -> ushort Npgsql.Replication.PgOutput.ReplicationValue Npgsql.Replication.PgOutput.ReplicationValue.Get(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.Replication.PgOutput.ReplicationValue.Get(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.Replication.PgOutput.ReplicationValue.GetDataTypeName() -> string! +Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldType() -> System.Type! Npgsql.Replication.PgOutput.ReplicationValue.GetPostgresType() -> Npgsql.PostgresTypes.PostgresType! Npgsql.Replication.PgOutput.ReplicationValue.GetStream() -> System.IO.Stream! @@ -985,10 +863,14 @@ Npgsql.Replication.PhysicalReplicationConnection Npgsql.Replication.PhysicalReplicationConnection.CreateReplicationSlot(string! slotName, bool isTemporary = false, bool reserveWal = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.Replication.PhysicalReplicationConnection.PhysicalReplicationConnection() -> void Npgsql.Replication.PhysicalReplicationConnection.PhysicalReplicationConnection(string? connectionString) -> void +Npgsql.Replication.PhysicalReplicationConnection.ReadReplicationSlot(string! slotName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationSlot -Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName) -> void +Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void +Npgsql.Replication.PhysicalReplicationSlot.RestartLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber? +Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? Npgsql.Replication.ReplicationConnection Npgsql.Replication.ReplicationConnection.CommandTimeout.get -> System.TimeSpan Npgsql.Replication.ReplicationConnection.CommandTimeout.set -> void @@ -1087,6 +969,8 @@ Npgsql.Schema.NpgsqlDbColumn.IsAliased.get -> bool? Npgsql.Schema.NpgsqlDbColumn.IsAliased.set -> void Npgsql.Schema.NpgsqlDbColumn.IsAutoIncrement.get -> bool? Npgsql.Schema.NpgsqlDbColumn.IsAutoIncrement.set -> void +Npgsql.Schema.NpgsqlDbColumn.IsIdentity.get -> bool? +Npgsql.Schema.NpgsqlDbColumn.IsIdentity.set -> void Npgsql.Schema.NpgsqlDbColumn.IsKey.get -> bool? Npgsql.Schema.NpgsqlDbColumn.IsKey.set -> void Npgsql.Schema.NpgsqlDbColumn.IsLong.get -> bool? @@ -1107,8 +991,8 @@ Npgsql.Schema.NpgsqlDbColumn.TypeOID.get -> uint Npgsql.Schema.NpgsqlDbColumn.UdtAssemblyQualifiedName.get -> string? Npgsql.Schema.NpgsqlDbColumn.UdtAssemblyQualifiedName.set -> void Npgsql.ServerCompatibilityMode -Npgsql.ServerCompatibilityMode.None = 0 -> Npgsql.ServerCompatibilityMode Npgsql.ServerCompatibilityMode.NoTypeLoading = 2 -> Npgsql.ServerCompatibilityMode +Npgsql.ServerCompatibilityMode.None = 0 -> Npgsql.ServerCompatibilityMode Npgsql.ServerCompatibilityMode.Redshift = 1 -> Npgsql.ServerCompatibilityMode Npgsql.SslMode Npgsql.SslMode.Allow = 1 -> Npgsql.SslMode @@ -1117,7 +1001,11 @@ Npgsql.SslMode.Prefer = 2 -> Npgsql.SslMode Npgsql.SslMode.Require = 3 -> Npgsql.SslMode Npgsql.SslMode.VerifyCA = 4 -> Npgsql.SslMode Npgsql.SslMode.VerifyFull = 5 -> Npgsql.SslMode +Npgsql.SslNegotiation +Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation +Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation Npgsql.StatementType +Npgsql.StatementType.Call = 11 -> Npgsql.StatementType Npgsql.StatementType.Copy = 8 -> Npgsql.StatementType Npgsql.StatementType.CreateTableAs = 5 -> Npgsql.StatementType Npgsql.StatementType.Delete = 3 -> Npgsql.StatementType @@ -1130,19 +1018,33 @@ Npgsql.StatementType.Select = 1 -> Npgsql.StatementType Npgsql.StatementType.Unknown = 0 -> Npgsql.StatementType Npgsql.StatementType.Update = 4 -> Npgsql.StatementType Npgsql.TypeMapping.INpgsqlTypeMapper -Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.AddDbTypeResolverFactory(Npgsql.Internal.DbTypeResolverFactory! factory) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.set -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableRecordsAsTuples() -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableUnmappedTypes() -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.Reset() -> void Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.TypeMapping.UserTypeMapping +Npgsql.TypeMapping.UserTypeMapping.ClrType.get -> System.Type! +Npgsql.TypeMapping.UserTypeMapping.PgTypeName.get -> string! Npgsql.Util.NpgsqlTimeout Npgsql.Util.NpgsqlTimeout.NpgsqlTimeout() -> void NpgsqlTypes.NpgsqlBox NpgsqlTypes.NpgsqlBox.Bottom.get -> double +NpgsqlTypes.NpgsqlBox.Deconstruct(out NpgsqlTypes.NpgsqlPoint lowerLeft, out NpgsqlTypes.NpgsqlPoint upperRight) -> void +NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top) -> void +NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top, out double width, out double height) -> void NpgsqlTypes.NpgsqlBox.Equals(NpgsqlTypes.NpgsqlBox other) -> bool NpgsqlTypes.NpgsqlBox.Height.get -> double NpgsqlTypes.NpgsqlBox.IsEmpty.get -> bool @@ -1150,104 +1052,55 @@ NpgsqlTypes.NpgsqlBox.Left.get -> double NpgsqlTypes.NpgsqlBox.LowerLeft.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlBox.LowerLeft.set -> void NpgsqlTypes.NpgsqlBox.NpgsqlBox() -> void -NpgsqlTypes.NpgsqlBox.NpgsqlBox(double top, double right, double bottom, double left) -> void NpgsqlTypes.NpgsqlBox.NpgsqlBox(NpgsqlTypes.NpgsqlPoint upperRight, NpgsqlTypes.NpgsqlPoint lowerLeft) -> void +NpgsqlTypes.NpgsqlBox.NpgsqlBox(double top, double right, double bottom, double left) -> void NpgsqlTypes.NpgsqlBox.Right.get -> double NpgsqlTypes.NpgsqlBox.Top.get -> double NpgsqlTypes.NpgsqlBox.UpperRight.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlBox.UpperRight.set -> void NpgsqlTypes.NpgsqlBox.Width.get -> double +NpgsqlTypes.NpgsqlCidr +NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! +NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(System.Net.IPAddress! address, byte netmask) -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void NpgsqlTypes.NpgsqlCircle NpgsqlTypes.NpgsqlCircle.Center.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlCircle.Center.set -> void +NpgsqlTypes.NpgsqlCircle.Deconstruct(out NpgsqlTypes.NpgsqlPoint center, out double radius) -> void +NpgsqlTypes.NpgsqlCircle.Deconstruct(out double x, out double y, out double radius) -> void NpgsqlTypes.NpgsqlCircle.Equals(NpgsqlTypes.NpgsqlCircle other) -> bool NpgsqlTypes.NpgsqlCircle.NpgsqlCircle() -> void -NpgsqlTypes.NpgsqlCircle.NpgsqlCircle(double x, double y, double radius) -> void NpgsqlTypes.NpgsqlCircle.NpgsqlCircle(NpgsqlTypes.NpgsqlPoint center, double radius) -> void +NpgsqlTypes.NpgsqlCircle.NpgsqlCircle(double x, double y, double radius) -> void NpgsqlTypes.NpgsqlCircle.Radius.get -> double NpgsqlTypes.NpgsqlCircle.Radius.set -> void NpgsqlTypes.NpgsqlCircle.X.get -> double NpgsqlTypes.NpgsqlCircle.X.set -> void NpgsqlTypes.NpgsqlCircle.Y.get -> double NpgsqlTypes.NpgsqlCircle.Y.set -> void -NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.AddDays(int days) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.AddMonths(int months) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.AddYears(int years) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.Compare(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> int -NpgsqlTypes.NpgsqlDate.Compare(object? x, object? y) -> int -NpgsqlTypes.NpgsqlDate.CompareTo(NpgsqlTypes.NpgsqlDate other) -> int -NpgsqlTypes.NpgsqlDate.CompareTo(object? o) -> int -NpgsqlTypes.NpgsqlDate.Day.get -> int -NpgsqlTypes.NpgsqlDate.DayOfWeek.get -> System.DayOfWeek -NpgsqlTypes.NpgsqlDate.DayOfYear.get -> int -NpgsqlTypes.NpgsqlDate.Equals(NpgsqlTypes.NpgsqlDate other) -> bool -NpgsqlTypes.NpgsqlDate.IsFinite.get -> bool -NpgsqlTypes.NpgsqlDate.IsInfinity.get -> bool -NpgsqlTypes.NpgsqlDate.IsLeapYear.get -> bool -NpgsqlTypes.NpgsqlDate.IsNegativeInfinity.get -> bool -NpgsqlTypes.NpgsqlDate.Month.get -> int -NpgsqlTypes.NpgsqlDate.NpgsqlDate() -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(int year, int month, int day) -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(NpgsqlTypes.NpgsqlDate copyFrom) -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateOnly date) -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateTime dateTime) -> void -NpgsqlTypes.NpgsqlDate.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.Year.get -> int -NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Add(in NpgsqlTypes.NpgsqlTimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Add(System.TimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddDays(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddHours(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddMilliseconds(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddMinutes(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddMonths(int value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddSeconds(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddTicks(long value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddYears(int value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Compare(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> int -NpgsqlTypes.NpgsqlDateTime.Compare(object? x, object? y) -> int -NpgsqlTypes.NpgsqlDateTime.CompareTo(NpgsqlTypes.NpgsqlDateTime other) -> int -NpgsqlTypes.NpgsqlDateTime.CompareTo(object? o) -> int -NpgsqlTypes.NpgsqlDateTime.Date.get -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDateTime.Day.get -> int -NpgsqlTypes.NpgsqlDateTime.DayOfWeek.get -> System.DayOfWeek -NpgsqlTypes.NpgsqlDateTime.DayOfYear.get -> int -NpgsqlTypes.NpgsqlDateTime.Equals(NpgsqlTypes.NpgsqlDateTime other) -> bool -NpgsqlTypes.NpgsqlDateTime.Hour.get -> int -NpgsqlTypes.NpgsqlDateTime.IsFinite.get -> bool -NpgsqlTypes.NpgsqlDateTime.IsInfinity.get -> bool -NpgsqlTypes.NpgsqlDateTime.IsLeapYear.get -> bool -NpgsqlTypes.NpgsqlDateTime.IsNegativeInfinity.get -> bool -NpgsqlTypes.NpgsqlDateTime.Kind.get -> System.DateTimeKind -NpgsqlTypes.NpgsqlDateTime.Millisecond.get -> int -NpgsqlTypes.NpgsqlDateTime.Minute.get -> int -NpgsqlTypes.NpgsqlDateTime.Month.get -> int -NpgsqlTypes.NpgsqlDateTime.Normalize() -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime() -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, int milliseconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks, System.DateTimeKind kind) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date, System.TimeSpan time, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(System.DateTime dateTime) -> void -NpgsqlTypes.NpgsqlDateTime.Second.get -> int -NpgsqlTypes.NpgsqlDateTime.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Subtract(NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlDateTime.Ticks.get -> long -NpgsqlTypes.NpgsqlDateTime.Time.get -> System.TimeSpan -NpgsqlTypes.NpgsqlDateTime.ToDateTime() -> System.DateTime -NpgsqlTypes.NpgsqlDateTime.ToLocalTime() -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.ToUniversalTime() -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Year.get -> int +NpgsqlTypes.NpgsqlCube +NpgsqlTypes.NpgsqlCube.Dimensions.get -> int +NpgsqlTypes.NpgsqlCube.Equals(NpgsqlTypes.NpgsqlCube other) -> bool +NpgsqlTypes.NpgsqlCube.IsPoint.get -> bool +NpgsqlTypes.NpgsqlCube.LowerLeft.get -> System.Collections.Generic.IReadOnlyList! +NpgsqlTypes.NpgsqlCube.NpgsqlCube() -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double coord) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double lowerLeft, double upperRight) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! coords) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! lowerLeft, System.Collections.Generic.IEnumerable! upperRight) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(double coord) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(double lowerLeft, double upperRight) -> void +NpgsqlTypes.NpgsqlCube.ToSubset(params int[]! indexes) -> NpgsqlTypes.NpgsqlCube +NpgsqlTypes.NpgsqlCube.UpperRight.get -> System.Collections.Generic.IReadOnlyList! NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Abstime = 33 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Array = -2147483648 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.Bigint = 1 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.BigIntMultirange = 536870913 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.BigIntRange = 1073741825 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Bigint = 1 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Bit = 25 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Boolean = 2 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Box = 3 -> NpgsqlTypes.NpgsqlDbType @@ -1257,6 +1110,7 @@ NpgsqlTypes.NpgsqlDbType.Cid = 43 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Cidr = 44 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Circle = 5 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Citext = 51 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Cube = 63 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Date = 7 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.DateMultirange = 536870919 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.DateRange = 1073741831 -> NpgsqlTypes.NpgsqlDbType @@ -1272,13 +1126,13 @@ NpgsqlTypes.NpgsqlDbType.IntegerRange = 1073741833 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.InternalChar = 38 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Interval = 30 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Json = 35 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.Jsonb = 36 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.JsonPath = 57 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.Line = 10 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Jsonb = 36 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LQuery = 61 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LSeg = 11 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LTree = 60 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LTxtQuery = 62 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Line = 10 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.MacAddr = 34 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.MacAddr8 = 54 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Money = 12 -> NpgsqlTypes.NpgsqlDbType @@ -1302,15 +1156,13 @@ NpgsqlTypes.NpgsqlDbType.Smallint = 18 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Text = 19 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Tid = 53 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Time = 20 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.TimeTz = 31 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Timestamp = 21 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampMultirange = 536870933 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampRange = 1073741845 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.TimestampTZ = 26 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTz = 26 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTzMultirange = 536870938 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTzRange = 1073741850 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.TimeTZ = 31 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.TimeTz = 31 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TsQuery = 46 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TsVector = 45 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Unknown = 40 -> NpgsqlTypes.NpgsqlDbType @@ -1322,15 +1174,12 @@ NpgsqlTypes.NpgsqlDbType.Xid8 = 64 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Xml = 28 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlInet NpgsqlTypes.NpgsqlInet.Address.get -> System.Net.IPAddress! -NpgsqlTypes.NpgsqlInet.Address.set -> void -NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out int netmask) -> void -NpgsqlTypes.NpgsqlInet.Equals(NpgsqlTypes.NpgsqlInet other) -> bool -NpgsqlTypes.NpgsqlInet.Netmask.get -> int -NpgsqlTypes.NpgsqlInet.Netmask.set -> void +NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlInet.Netmask.get -> byte NpgsqlTypes.NpgsqlInet.NpgsqlInet() -> void -NpgsqlTypes.NpgsqlInet.NpgsqlInet(string! addr) -> void NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address) -> void -NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, int netmask) -> void +NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, byte netmask) -> void +NpgsqlTypes.NpgsqlInet.NpgsqlInet(string! addr) -> void NpgsqlTypes.NpgsqlInterval NpgsqlTypes.NpgsqlInterval.Days.get -> int NpgsqlTypes.NpgsqlInterval.Equals(NpgsqlTypes.NpgsqlInterval other) -> bool @@ -1338,6 +1187,16 @@ NpgsqlTypes.NpgsqlInterval.Months.get -> int NpgsqlTypes.NpgsqlInterval.NpgsqlInterval() -> void NpgsqlTypes.NpgsqlInterval.NpgsqlInterval(int months, int days, long time) -> void NpgsqlTypes.NpgsqlInterval.Time.get -> long +NpgsqlTypes.NpgsqlLSeg +NpgsqlTypes.NpgsqlLSeg.Deconstruct(out NpgsqlTypes.NpgsqlPoint start, out NpgsqlTypes.NpgsqlPoint end) -> void +NpgsqlTypes.NpgsqlLSeg.End.get -> NpgsqlTypes.NpgsqlPoint +NpgsqlTypes.NpgsqlLSeg.End.set -> void +NpgsqlTypes.NpgsqlLSeg.Equals(NpgsqlTypes.NpgsqlLSeg other) -> bool +NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg() -> void +NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(NpgsqlTypes.NpgsqlPoint start, NpgsqlTypes.NpgsqlPoint end) -> void +NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(double startx, double starty, double endx, double endy) -> void +NpgsqlTypes.NpgsqlLSeg.Start.get -> NpgsqlTypes.NpgsqlPoint +NpgsqlTypes.NpgsqlLSeg.Start.set -> void NpgsqlTypes.NpgsqlLine NpgsqlTypes.NpgsqlLine.A.get -> double NpgsqlTypes.NpgsqlLine.A.set -> void @@ -1345,6 +1204,7 @@ NpgsqlTypes.NpgsqlLine.B.get -> double NpgsqlTypes.NpgsqlLine.B.set -> void NpgsqlTypes.NpgsqlLine.C.get -> double NpgsqlTypes.NpgsqlLine.C.set -> void +NpgsqlTypes.NpgsqlLine.Deconstruct(out double a, out double b, out double c) -> void NpgsqlTypes.NpgsqlLine.Equals(NpgsqlTypes.NpgsqlLine other) -> bool NpgsqlTypes.NpgsqlLine.NpgsqlLine() -> void NpgsqlTypes.NpgsqlLine.NpgsqlLine(double a, double b, double c) -> void @@ -1353,15 +1213,6 @@ NpgsqlTypes.NpgsqlLogSequenceNumber.CompareTo(NpgsqlTypes.NpgsqlLogSequenceNumbe NpgsqlTypes.NpgsqlLogSequenceNumber.Equals(NpgsqlTypes.NpgsqlLogSequenceNumber other) -> bool NpgsqlTypes.NpgsqlLogSequenceNumber.NpgsqlLogSequenceNumber() -> void NpgsqlTypes.NpgsqlLogSequenceNumber.NpgsqlLogSequenceNumber(ulong value) -> void -NpgsqlTypes.NpgsqlLSeg -NpgsqlTypes.NpgsqlLSeg.End.get -> NpgsqlTypes.NpgsqlPoint -NpgsqlTypes.NpgsqlLSeg.End.set -> void -NpgsqlTypes.NpgsqlLSeg.Equals(NpgsqlTypes.NpgsqlLSeg other) -> bool -NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg() -> void -NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(double startx, double starty, double endx, double endy) -> void -NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(NpgsqlTypes.NpgsqlPoint start, NpgsqlTypes.NpgsqlPoint end) -> void -NpgsqlTypes.NpgsqlLSeg.Start.get -> NpgsqlTypes.NpgsqlPoint -NpgsqlTypes.NpgsqlLSeg.Start.set -> void NpgsqlTypes.NpgsqlPath NpgsqlTypes.NpgsqlPath.Add(NpgsqlTypes.NpgsqlPoint item) -> void NpgsqlTypes.NpgsqlPath.Capacity.get -> int @@ -1375,12 +1226,12 @@ NpgsqlTypes.NpgsqlPath.IndexOf(NpgsqlTypes.NpgsqlPoint item) -> int NpgsqlTypes.NpgsqlPath.Insert(int index, NpgsqlTypes.NpgsqlPoint item) -> void NpgsqlTypes.NpgsqlPath.IsReadOnly.get -> bool NpgsqlTypes.NpgsqlPath.NpgsqlPath() -> void +NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points) -> void +NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points, bool open) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(bool open) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(int capacity) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(int capacity, bool open) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(params NpgsqlTypes.NpgsqlPoint[]! points) -> void -NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points) -> void -NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points, bool open) -> void NpgsqlTypes.NpgsqlPath.Open.get -> bool NpgsqlTypes.NpgsqlPath.Open.set -> void NpgsqlTypes.NpgsqlPath.Remove(NpgsqlTypes.NpgsqlPoint item) -> bool @@ -1388,6 +1239,7 @@ NpgsqlTypes.NpgsqlPath.RemoveAt(int index) -> void NpgsqlTypes.NpgsqlPath.this[int index].get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlPath.this[int index].set -> void NpgsqlTypes.NpgsqlPoint +NpgsqlTypes.NpgsqlPoint.Deconstruct(out double x, out double y) -> void NpgsqlTypes.NpgsqlPoint.Equals(NpgsqlTypes.NpgsqlPoint other) -> bool NpgsqlTypes.NpgsqlPoint.NpgsqlPoint() -> void NpgsqlTypes.NpgsqlPoint.NpgsqlPoint(double x, double y) -> void @@ -1408,9 +1260,9 @@ NpgsqlTypes.NpgsqlPolygon.IndexOf(NpgsqlTypes.NpgsqlPoint item) -> int NpgsqlTypes.NpgsqlPolygon.Insert(int index, NpgsqlTypes.NpgsqlPoint item) -> void NpgsqlTypes.NpgsqlPolygon.IsReadOnly.get -> bool NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon() -> void +NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(System.Collections.Generic.IEnumerable! points) -> void NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(int capacity) -> void NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(params NpgsqlTypes.NpgsqlPoint[]! points) -> void -NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(System.Collections.Generic.IEnumerable! points) -> void NpgsqlTypes.NpgsqlPolygon.Remove(NpgsqlTypes.NpgsqlPoint item) -> bool NpgsqlTypes.NpgsqlPolygon.RemoveAt(int index) -> void NpgsqlTypes.NpgsqlPolygon.this[int index].get -> NpgsqlTypes.NpgsqlPoint @@ -1422,9 +1274,9 @@ NpgsqlTypes.NpgsqlRange.LowerBound.get -> T NpgsqlTypes.NpgsqlRange.LowerBoundInfinite.get -> bool NpgsqlTypes.NpgsqlRange.LowerBoundIsInclusive.get -> bool NpgsqlTypes.NpgsqlRange.NpgsqlRange() -> void -NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) -> void -NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, T upperBound, bool upperBoundIsInclusive) -> void NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, T upperBound) -> void +NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, T upperBound, bool upperBoundIsInclusive) -> void +NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) -> void NpgsqlTypes.NpgsqlRange.RangeTypeConverter NpgsqlTypes.NpgsqlRange.RangeTypeConverter.RangeTypeConverter() -> void NpgsqlTypes.NpgsqlRange.UpperBound.get -> T @@ -1432,50 +1284,11 @@ NpgsqlTypes.NpgsqlRange.UpperBoundInfinite.get -> bool NpgsqlTypes.NpgsqlRange.UpperBoundIsInclusive.get -> bool NpgsqlTypes.NpgsqlTid NpgsqlTypes.NpgsqlTid.BlockNumber.get -> uint +NpgsqlTypes.NpgsqlTid.Deconstruct(out uint blockNumber, out ushort offsetNumber) -> void NpgsqlTypes.NpgsqlTid.Equals(NpgsqlTypes.NpgsqlTid other) -> bool NpgsqlTypes.NpgsqlTid.NpgsqlTid() -> void NpgsqlTypes.NpgsqlTid.NpgsqlTid(uint blockNumber, ushort offsetNumber) -> void NpgsqlTypes.NpgsqlTid.OffsetNumber.get -> ushort -NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Canonicalize() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.CompareTo(NpgsqlTypes.NpgsqlTimeSpan other) -> int -NpgsqlTypes.NpgsqlTimeSpan.CompareTo(object? other) -> int -NpgsqlTypes.NpgsqlTimeSpan.Days.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Duration() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Equals(NpgsqlTypes.NpgsqlTimeSpan other) -> bool -NpgsqlTypes.NpgsqlTimeSpan.Hours.get -> int -NpgsqlTypes.NpgsqlTimeSpan.JustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.JustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.JustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Microseconds.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Milliseconds.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Minutes.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Months.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Negate() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan() -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds, int milliseconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, long ticks) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int years, int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(long ticks) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(System.TimeSpan timespan) -> void -NpgsqlTypes.NpgsqlTimeSpan.Seconds.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Ticks.get -> long -NpgsqlTypes.NpgsqlTimeSpan.Time.get -> System.TimeSpan -NpgsqlTypes.NpgsqlTimeSpan.TotalDays.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalHours.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMicroseconds.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMilliseconds.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMinutes.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMonths.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalSeconds.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalTicks.get -> long -NpgsqlTypes.NpgsqlTimeSpan.UnjustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.UnjustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.UnjustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan NpgsqlTypes.NpgsqlTsQuery NpgsqlTypes.NpgsqlTsQuery.Kind.get -> NpgsqlTypes.NpgsqlTsQuery.NodeKind NpgsqlTypes.NpgsqlTsQuery.NodeKind @@ -1498,9 +1311,9 @@ NpgsqlTypes.NpgsqlTsQueryBinOp.Right.set -> void NpgsqlTypes.NpgsqlTsQueryEmpty NpgsqlTypes.NpgsqlTsQueryEmpty.NpgsqlTsQueryEmpty() -> void NpgsqlTypes.NpgsqlTsQueryFollowedBy -NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> int +NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> short NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.set -> void -NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, int distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void +NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, short distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void NpgsqlTypes.NpgsqlTsQueryLexeme NpgsqlTypes.NpgsqlTsQueryLexeme.IsPrefixSearch.get -> bool NpgsqlTypes.NpgsqlTsQueryLexeme.IsPrefixSearch.set -> void @@ -1535,7 +1348,6 @@ NpgsqlTypes.NpgsqlTsVector.Lexeme.Lexeme(string! text) -> void NpgsqlTypes.NpgsqlTsVector.Lexeme.Lexeme(string! text, System.Collections.Generic.List? wordEntryPositions) -> void NpgsqlTypes.NpgsqlTsVector.Lexeme.Text.get -> string! NpgsqlTypes.NpgsqlTsVector.Lexeme.Text.set -> void -NpgsqlTypes.NpgsqlTsVector.Lexeme.this[int index].get -> NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight.A = 3 -> NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight.B = 2 -> NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight @@ -1547,10 +1359,253 @@ NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.Pos.get -> int NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.Weight.get -> NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.WordEntryPos() -> void NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.WordEntryPos(int pos, NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight weight = NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight.D) -> void +NpgsqlTypes.NpgsqlTsVector.Lexeme.this[int index].get -> NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos NpgsqlTypes.NpgsqlTsVector.this[int index].get -> NpgsqlTypes.NpgsqlTsVector.Lexeme NpgsqlTypes.PgNameAttribute NpgsqlTypes.PgNameAttribute.PgName.get -> string! NpgsqlTypes.PgNameAttribute.PgNameAttribute(string! pgName) -> void +abstract Npgsql.NpgsqlDataSource.Clear() -> void +abstract Npgsql.Replication.PgOutput.Messages.UpdateMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! +abstract NpgsqlTypes.NpgsqlTsQuery.Equals(NpgsqlTypes.NpgsqlTsQuery? other) -> bool +const Npgsql.NpgsqlConnection.DefaultPort = 5432 -> int +const Npgsql.PostgresErrorCodes.ActiveSqlTransaction = "25001" -> string! +const Npgsql.PostgresErrorCodes.AdminShutdown = "57P01" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousAlias = "42P09" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousColumn = "42702" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousFunction = "42725" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousParameter = "42P08" -> string! +const Npgsql.PostgresErrorCodes.ArraySubscriptError = "2202E" -> string! +const Npgsql.PostgresErrorCodes.AssertFailure = "P0004" -> string! +const Npgsql.PostgresErrorCodes.BadCopyFileFormat = "22P04" -> string! +const Npgsql.PostgresErrorCodes.BranchTransactionAlreadyActive = "25002" -> string! +const Npgsql.PostgresErrorCodes.CannotCoerce = "42846" -> string! +const Npgsql.PostgresErrorCodes.CannotConnectNow = "57P03" -> string! +const Npgsql.PostgresErrorCodes.CantChangeRuntimeParam = "55P02" -> string! +const Npgsql.PostgresErrorCodes.CardinalityViolation = "21000" -> string! +const Npgsql.PostgresErrorCodes.CaseNotFound = "20000" -> string! +const Npgsql.PostgresErrorCodes.CharacterNotInRepertoire = "22021" -> string! +const Npgsql.PostgresErrorCodes.CheckViolation = "23514" -> string! +const Npgsql.PostgresErrorCodes.CollationMismatch = "42P21" -> string! +const Npgsql.PostgresErrorCodes.ConfigFileError = "F0000" -> string! +const Npgsql.PostgresErrorCodes.ConfigurationLimitExceeded = "53400" -> string! +const Npgsql.PostgresErrorCodes.ConnectionDoesNotExist = "08003" -> string! +const Npgsql.PostgresErrorCodes.ConnectionException = "08000" -> string! +const Npgsql.PostgresErrorCodes.ConnectionFailure = "08006" -> string! +const Npgsql.PostgresErrorCodes.ContainingSqlNotPermittedExternalRoutineException = "38001" -> string! +const Npgsql.PostgresErrorCodes.CrashShutdown = "57P02" -> string! +const Npgsql.PostgresErrorCodes.DataCorrupted = "XX001" -> string! +const Npgsql.PostgresErrorCodes.DataException = "22000" -> string! +const Npgsql.PostgresErrorCodes.DatabaseDropped = "57P04" -> string! +const Npgsql.PostgresErrorCodes.DatatypeMismatch = "42804" -> string! +const Npgsql.PostgresErrorCodes.DatetimeFieldOverflow = "22008" -> string! +const Npgsql.PostgresErrorCodes.DeadlockDetected = "40P01" -> string! +const Npgsql.PostgresErrorCodes.DependentObjectsStillExist = "2BP01" -> string! +const Npgsql.PostgresErrorCodes.DependentPrivilegeDescriptorsStillExist = "2B000" -> string! +const Npgsql.PostgresErrorCodes.DeprecatedFeatureWarning = "01P01" -> string! +const Npgsql.PostgresErrorCodes.DiagnosticsException = "0Z000" -> string! +const Npgsql.PostgresErrorCodes.DiskFull = "53100" -> string! +const Npgsql.PostgresErrorCodes.DivisionByZero = "22012" -> string! +const Npgsql.PostgresErrorCodes.DuplicateAlias = "42712" -> string! +const Npgsql.PostgresErrorCodes.DuplicateColumn = "42701" -> string! +const Npgsql.PostgresErrorCodes.DuplicateCursor = "42P03" -> string! +const Npgsql.PostgresErrorCodes.DuplicateDatabase = "42P04" -> string! +const Npgsql.PostgresErrorCodes.DuplicateFile = "58P02" -> string! +const Npgsql.PostgresErrorCodes.DuplicateFunction = "42723" -> string! +const Npgsql.PostgresErrorCodes.DuplicateObject = "42710" -> string! +const Npgsql.PostgresErrorCodes.DuplicatePreparedStatement = "42P05" -> string! +const Npgsql.PostgresErrorCodes.DuplicateSchema = "42P06" -> string! +const Npgsql.PostgresErrorCodes.DuplicateTable = "42P07" -> string! +const Npgsql.PostgresErrorCodes.DynamicResultSetsReturnedWarning = "0100C" -> string! +const Npgsql.PostgresErrorCodes.ErrorInAssignment = "22005" -> string! +const Npgsql.PostgresErrorCodes.EscapeCharacterConflict = "2200B" -> string! +const Npgsql.PostgresErrorCodes.EventTriggerProtocolViolatedExternalRoutineInvocationException = "39P03" -> string! +const Npgsql.PostgresErrorCodes.ExclusionViolation = "23P01" -> string! +const Npgsql.PostgresErrorCodes.ExternalRoutineException = "38000" -> string! +const Npgsql.PostgresErrorCodes.ExternalRoutineInvocationException = "39000" -> string! +const Npgsql.PostgresErrorCodes.FdwColumnNameNotFound = "HV005" -> string! +const Npgsql.PostgresErrorCodes.FdwDynamicParameterValueNeeded = "HV002" -> string! +const Npgsql.PostgresErrorCodes.FdwError = "HV000" -> string! +const Npgsql.PostgresErrorCodes.FdwFunctionSequenceError = "HV010" -> string! +const Npgsql.PostgresErrorCodes.FdwInconsistentDescriptorInformation = "HV021" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidAttributeValue = "HV024" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidColumnName = "HV007" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidColumnNumber = "HV008" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidDataType = "HV004" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidDataTypeDescriptors = "HV006" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidDescriptorFieldIdentifier = "HV091" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidHandle = "HV00B" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidOptionIndex = "HV00C" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidOptionName = "HV00D" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidStringFormat = "HV00A" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidStringLengthOrBufferLength = "HV090" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidUseOfNullPointer = "HV009" -> string! +const Npgsql.PostgresErrorCodes.FdwNoSchemas = "HV00P" -> string! +const Npgsql.PostgresErrorCodes.FdwOptionNameNotFound = "HV00J" -> string! +const Npgsql.PostgresErrorCodes.FdwOutOfMemory = "HV001" -> string! +const Npgsql.PostgresErrorCodes.FdwReplyHandle = "HV00K" -> string! +const Npgsql.PostgresErrorCodes.FdwSchemaNotFound = "HV00Q" -> string! +const Npgsql.PostgresErrorCodes.FdwTableNotFound = "HV00R" -> string! +const Npgsql.PostgresErrorCodes.FdwTooManyHandles = "HV014" -> string! +const Npgsql.PostgresErrorCodes.FdwUnableToCreateExecution = "HV00L" -> string! +const Npgsql.PostgresErrorCodes.FdwUnableToCreateReply = "HV00M" -> string! +const Npgsql.PostgresErrorCodes.FdwUnableToEstablishConnection = "HV00N" -> string! +const Npgsql.PostgresErrorCodes.FeatureNotSupported = "0A000" -> string! +const Npgsql.PostgresErrorCodes.FloatingPointException = "22P01" -> string! +const Npgsql.PostgresErrorCodes.ForeignKeyViolation = "23503" -> string! +const Npgsql.PostgresErrorCodes.FunctionExecutedNoReturnStatementSqlRoutineException = "2F005" -> string! +const Npgsql.PostgresErrorCodes.GroupingError = "42803" -> string! +const Npgsql.PostgresErrorCodes.HeldCursorRequiresSameIsolationLevel = "25008" -> string! +const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! +const Npgsql.PostgresErrorCodes.ImplicitZeroBitPaddingWarning = "01008" -> string! +const Npgsql.PostgresErrorCodes.InFailedSqlTransaction = "25P02" -> string! +const Npgsql.PostgresErrorCodes.InappropriateAccessModeForBranchTransaction = "25003" -> string! +const Npgsql.PostgresErrorCodes.InappropriateIsolationLevelForBranchTransaction = "25004" -> string! +const Npgsql.PostgresErrorCodes.IndeterminateCollation = "42P22" -> string! +const Npgsql.PostgresErrorCodes.IndeterminateDatatype = "42P18" -> string! +const Npgsql.PostgresErrorCodes.IndexCorrupted = "XX002" -> string! +const Npgsql.PostgresErrorCodes.IndicatorOverflow = "22022" -> string! +const Npgsql.PostgresErrorCodes.InsufficientPrivilege = "42501" -> string! +const Npgsql.PostgresErrorCodes.InsufficientResources = "53000" -> string! +const Npgsql.PostgresErrorCodes.IntegrityConstraintViolation = "23000" -> string! +const Npgsql.PostgresErrorCodes.InternalError = "XX000" -> string! +const Npgsql.PostgresErrorCodes.IntervalFieldOverflow = "22015" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForLogarithm = "2201E" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForNthValueFunction = "22016" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForNtileFunction = "22014" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForPowerFunction = "2201F" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForWidthBucketFunction = "2201G" -> string! +const Npgsql.PostgresErrorCodes.InvalidAuthorizationSpecification = "28000" -> string! +const Npgsql.PostgresErrorCodes.InvalidBinaryRepresentation = "22P03" -> string! +const Npgsql.PostgresErrorCodes.InvalidCatalogName = "3D000" -> string! +const Npgsql.PostgresErrorCodes.InvalidCharacterValueForCast = "22018" -> string! +const Npgsql.PostgresErrorCodes.InvalidColumnDefinition = "42611" -> string! +const Npgsql.PostgresErrorCodes.InvalidColumnReference = "42P10" -> string! +const Npgsql.PostgresErrorCodes.InvalidCursorDefinition = "42P11" -> string! +const Npgsql.PostgresErrorCodes.InvalidCursorName = "34000" -> string! +const Npgsql.PostgresErrorCodes.InvalidCursorState = "24000" -> string! +const Npgsql.PostgresErrorCodes.InvalidDatabaseDefinition = "42P12" -> string! +const Npgsql.PostgresErrorCodes.InvalidDatetimeFormat = "22007" -> string! +const Npgsql.PostgresErrorCodes.InvalidEscapeCharacter = "22019" -> string! +const Npgsql.PostgresErrorCodes.InvalidEscapeOctet = "2200D" -> string! +const Npgsql.PostgresErrorCodes.InvalidEscapeSequence = "22025" -> string! +const Npgsql.PostgresErrorCodes.InvalidForeignKey = "42830" -> string! +const Npgsql.PostgresErrorCodes.InvalidFunctionDefinition = "42P13" -> string! +const Npgsql.PostgresErrorCodes.InvalidGrantOperation = "0LP01" -> string! +const Npgsql.PostgresErrorCodes.InvalidGrantor = "0L000" -> string! +const Npgsql.PostgresErrorCodes.InvalidIndicatorParameterValue = "22010" -> string! +const Npgsql.PostgresErrorCodes.InvalidLocatorSpecification = "0F001" -> string! +const Npgsql.PostgresErrorCodes.InvalidName = "42602" -> string! +const Npgsql.PostgresErrorCodes.InvalidObjectDefinition = "42P17" -> string! +const Npgsql.PostgresErrorCodes.InvalidParameterValue = "22023" -> string! +const Npgsql.PostgresErrorCodes.InvalidPassword = "28P01" -> string! +const Npgsql.PostgresErrorCodes.InvalidPreparedStatementDefinition = "42P14" -> string! +const Npgsql.PostgresErrorCodes.InvalidRecursion = "42P19" -> string! +const Npgsql.PostgresErrorCodes.InvalidRegularExpression = "2201B" -> string! +const Npgsql.PostgresErrorCodes.InvalidRoleSpecification = "0P000" -> string! +const Npgsql.PostgresErrorCodes.InvalidRowCountInLimitClause = "2201W" -> string! +const Npgsql.PostgresErrorCodes.InvalidRowCountInResultOffsetClause = "2201X" -> string! +const Npgsql.PostgresErrorCodes.InvalidSavepointSpecification = "3B001" -> string! +const Npgsql.PostgresErrorCodes.InvalidSchemaDefinition = "42P15" -> string! +const Npgsql.PostgresErrorCodes.InvalidSchemaName = "3F000" -> string! +const Npgsql.PostgresErrorCodes.InvalidSqlStatementName = "26000" -> string! +const Npgsql.PostgresErrorCodes.InvalidSqlstateReturnedExternalRoutineInvocationException = "39001" -> string! +const Npgsql.PostgresErrorCodes.InvalidTableDefinition = "42P16" -> string! +const Npgsql.PostgresErrorCodes.InvalidTablesampleArgument = "2202H" -> string! +const Npgsql.PostgresErrorCodes.InvalidTablesampleRepeat = "2202G" -> string! +const Npgsql.PostgresErrorCodes.InvalidTextRepresentation = "22P02" -> string! +const Npgsql.PostgresErrorCodes.InvalidTimeZoneDisplacementValue = "22009" -> string! +const Npgsql.PostgresErrorCodes.InvalidTransactionInitiation = "0B000" -> string! +const Npgsql.PostgresErrorCodes.InvalidTransactionState = "25000" -> string! +const Npgsql.PostgresErrorCodes.InvalidTransactionTermination = "2D000" -> string! +const Npgsql.PostgresErrorCodes.InvalidUseOfEscapeCharacter = "2200C" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlComment = "2200S" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlContent = "2200N" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlDocument = "2200M" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlProcessingInstruction = "2200T" -> string! +const Npgsql.PostgresErrorCodes.IoError = "58030" -> string! +const Npgsql.PostgresErrorCodes.LocatorException = "0F000" -> string! +const Npgsql.PostgresErrorCodes.LockFileExists = "F0001" -> string! +const Npgsql.PostgresErrorCodes.LockNotAvailable = "55P03" -> string! +const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedExternalRoutineException = "38002" -> string! +const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedSqlRoutineException = "2F002" -> string! +const Npgsql.PostgresErrorCodes.MostSpecificTypeMismatch = "2200G" -> string! +const Npgsql.PostgresErrorCodes.NameTooLong = "42622" -> string! +const Npgsql.PostgresErrorCodes.NoActiveSqlTransaction = "25P01" -> string! +const Npgsql.PostgresErrorCodes.NoActiveSqlTransactionForBranchTransaction = "25005" -> string! +const Npgsql.PostgresErrorCodes.NoAdditionalDynamicResultSetsReturned = "02001" -> string! +const Npgsql.PostgresErrorCodes.NoData = "02000" -> string! +const Npgsql.PostgresErrorCodes.NoDataFound = "P0002" -> string! +const Npgsql.PostgresErrorCodes.NonstandardUseOfEscapeCharacter = "22P06" -> string! +const Npgsql.PostgresErrorCodes.NotAnXmlDocument = "2200L" -> string! +const Npgsql.PostgresErrorCodes.NotNullViolation = "23502" -> string! +const Npgsql.PostgresErrorCodes.NullValueEliminatedInSetFunctionWarning = "01003" -> string! +const Npgsql.PostgresErrorCodes.NullValueNoIndicatorParameter = "22002" -> string! +const Npgsql.PostgresErrorCodes.NullValueNotAllowed = "22004" -> string! +const Npgsql.PostgresErrorCodes.NullValueNotAllowedExternalRoutineInvocationException = "39004" -> string! +const Npgsql.PostgresErrorCodes.NumericValueOutOfRange = "22003" -> string! +const Npgsql.PostgresErrorCodes.ObjectInUse = "55006" -> string! +const Npgsql.PostgresErrorCodes.ObjectNotInPrerequisiteState = "55000" -> string! +const Npgsql.PostgresErrorCodes.OperatorIntervention = "57000" -> string! +const Npgsql.PostgresErrorCodes.OutOfMemory = "53200" -> string! +const Npgsql.PostgresErrorCodes.PlpgsqlError = "P0000" -> string! +const Npgsql.PostgresErrorCodes.PrivilegeNotGrantedWarning = "01007" -> string! +const Npgsql.PostgresErrorCodes.PrivilegeNotRevokedWarning = "01006" -> string! +const Npgsql.PostgresErrorCodes.ProgramLimitExceeded = "54000" -> string! +const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedExternalRoutineException = "38003" -> string! +const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedSqlRoutineException = "2F003" -> string! +const Npgsql.PostgresErrorCodes.ProtocolViolation = "08P01" -> string! +const Npgsql.PostgresErrorCodes.QueryCanceled = "57014" -> string! +const Npgsql.PostgresErrorCodes.RaiseException = "P0001" -> string! +const Npgsql.PostgresErrorCodes.ReadOnlySqlTransaction = "25006" -> string! +const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedExternalRoutineException = "38004" -> string! +const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedSqlRoutineException = "2F004" -> string! +const Npgsql.PostgresErrorCodes.ReservedName = "42939" -> string! +const Npgsql.PostgresErrorCodes.RestrictViolation = "23001" -> string! +const Npgsql.PostgresErrorCodes.SavepointException = "3B000" -> string! +const Npgsql.PostgresErrorCodes.SchemaAndDataStatementMixingNotSupported = "25007" -> string! +const Npgsql.PostgresErrorCodes.SerializationFailure = "40001" -> string! +const Npgsql.PostgresErrorCodes.SnapshotFailure = "72000" -> string! +const Npgsql.PostgresErrorCodes.SqlClientUnableToEstablishSqlConnection = "08001" -> string! +const Npgsql.PostgresErrorCodes.SqlRoutineException = "2F000" -> string! +const Npgsql.PostgresErrorCodes.SqlServerRejectedEstablishmentOfSqlConnection = "08004" -> string! +const Npgsql.PostgresErrorCodes.SqlStatementNotYetComplete = "03000" -> string! +const Npgsql.PostgresErrorCodes.SrfProtocolViolatedExternalRoutineInvocationException = "39P02" -> string! +const Npgsql.PostgresErrorCodes.StackedDiagnosticsAccessedWithoutActiveHandler = "0Z002" -> string! +const Npgsql.PostgresErrorCodes.StatementCompletionUnknown = "40003" -> string! +const Npgsql.PostgresErrorCodes.StatementTooComplex = "54001" -> string! +const Npgsql.PostgresErrorCodes.StringDataLengthMismatch = "22026" -> string! +const Npgsql.PostgresErrorCodes.StringDataRightTruncation = "22001" -> string! +const Npgsql.PostgresErrorCodes.StringDataRightTruncationWarning = "01004" -> string! +const Npgsql.PostgresErrorCodes.SubstringError = "22011" -> string! +const Npgsql.PostgresErrorCodes.SuccessfulCompletion = "00000" -> string! +const Npgsql.PostgresErrorCodes.SyntaxError = "42601" -> string! +const Npgsql.PostgresErrorCodes.SyntaxErrorOrAccessRuleViolation = "42000" -> string! +const Npgsql.PostgresErrorCodes.SystemError = "58000" -> string! +const Npgsql.PostgresErrorCodes.TooManyArguments = "54023" -> string! +const Npgsql.PostgresErrorCodes.TooManyColumns = "54011" -> string! +const Npgsql.PostgresErrorCodes.TooManyConnections = "53300" -> string! +const Npgsql.PostgresErrorCodes.TooManyRows = "P0003" -> string! +const Npgsql.PostgresErrorCodes.TransactionIntegrityConstraintViolation = "40002" -> string! +const Npgsql.PostgresErrorCodes.TransactionResolutionUnknown = "08007" -> string! +const Npgsql.PostgresErrorCodes.TransactionRollback = "40000" -> string! +const Npgsql.PostgresErrorCodes.TriggerProtocolViolatedExternalRoutineInvocationException = "39P01" -> string! +const Npgsql.PostgresErrorCodes.TriggeredActionException = "09000" -> string! +const Npgsql.PostgresErrorCodes.TriggeredDataChangeViolation = "27000" -> string! +const Npgsql.PostgresErrorCodes.TrimError = "22027" -> string! +const Npgsql.PostgresErrorCodes.UndefinedColumn = "42703" -> string! +const Npgsql.PostgresErrorCodes.UndefinedFile = "58P01" -> string! +const Npgsql.PostgresErrorCodes.UndefinedFunction = "42883" -> string! +const Npgsql.PostgresErrorCodes.UndefinedObject = "42704" -> string! +const Npgsql.PostgresErrorCodes.UndefinedParameter = "42P02" -> string! +const Npgsql.PostgresErrorCodes.UndefinedTable = "42P01" -> string! +const Npgsql.PostgresErrorCodes.UniqueViolation = "23505" -> string! +const Npgsql.PostgresErrorCodes.UnterminatedCString = "22024" -> string! +const Npgsql.PostgresErrorCodes.UntranslatableCharacter = "22P05" -> string! +const Npgsql.PostgresErrorCodes.Warning = "01000" -> string! +const Npgsql.PostgresErrorCodes.WindowingError = "42P20" -> string! +const Npgsql.PostgresErrorCodes.WithCheckOptionViolation = "44000" -> string! +const Npgsql.PostgresErrorCodes.WrongObjectType = "42809" -> string! +const Npgsql.PostgresErrorCodes.ZeroLengthCharacterString = "2200F" -> string! override Npgsql.BackendMessages.FieldDescription.ToString() -> string! override Npgsql.NpgsqlBatch.Cancel() -> void override Npgsql.NpgsqlBatch.CreateDbBatchCommand() -> System.Data.Common.DbBatchCommand! @@ -1559,6 +1614,7 @@ override Npgsql.NpgsqlBatch.DbConnection.get -> System.Data.Common.DbConnection? override Npgsql.NpgsqlBatch.DbConnection.set -> void override Npgsql.NpgsqlBatch.DbTransaction.get -> System.Data.Common.DbTransaction? override Npgsql.NpgsqlBatch.DbTransaction.set -> void +override Npgsql.NpgsqlBatch.Dispose() -> void override Npgsql.NpgsqlBatch.ExecuteDbDataReader(System.Data.CommandBehavior behavior) -> System.Data.Common.DbDataReader! override Npgsql.NpgsqlBatch.ExecuteDbDataReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlBatch.ExecuteNonQuery() -> int @@ -1569,10 +1625,12 @@ override Npgsql.NpgsqlBatch.Prepare() -> void override Npgsql.NpgsqlBatch.PrepareAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlBatch.Timeout.get -> int override Npgsql.NpgsqlBatch.Timeout.set -> void +override Npgsql.NpgsqlBatchCommand.CanCreateParameter.get -> bool override Npgsql.NpgsqlBatchCommand.CommandText.get -> string! override Npgsql.NpgsqlBatchCommand.CommandText.set -> void override Npgsql.NpgsqlBatchCommand.CommandType.get -> System.Data.CommandType override Npgsql.NpgsqlBatchCommand.CommandType.set -> void +override Npgsql.NpgsqlBatchCommand.CreateParameter() -> Npgsql.NpgsqlParameter! override Npgsql.NpgsqlBatchCommand.RecordsAffected.get -> int override Npgsql.NpgsqlBatchCommand.ToString() -> string! override Npgsql.NpgsqlBatchCommandCollection.Add(System.Data.Common.DbBatchCommand! item) -> void @@ -1593,8 +1651,17 @@ override Npgsql.NpgsqlCommand.CommandTimeout.get -> int override Npgsql.NpgsqlCommand.CommandTimeout.set -> void override Npgsql.NpgsqlCommand.CommandType.get -> System.Data.CommandType override Npgsql.NpgsqlCommand.CommandType.set -> void +override Npgsql.NpgsqlCommand.CreateDbParameter() -> System.Data.Common.DbParameter! +override Npgsql.NpgsqlCommand.DbConnection.get -> System.Data.Common.DbConnection? +override Npgsql.NpgsqlCommand.DbConnection.set -> void +override Npgsql.NpgsqlCommand.DbParameterCollection.get -> System.Data.Common.DbParameterCollection! +override Npgsql.NpgsqlCommand.DbTransaction.get -> System.Data.Common.DbTransaction? +override Npgsql.NpgsqlCommand.DbTransaction.set -> void override Npgsql.NpgsqlCommand.DesignTimeVisible.get -> bool override Npgsql.NpgsqlCommand.DesignTimeVisible.set -> void +override Npgsql.NpgsqlCommand.Dispose(bool disposing) -> void +override Npgsql.NpgsqlCommand.ExecuteDbDataReader(System.Data.CommandBehavior behavior) -> System.Data.Common.DbDataReader! +override Npgsql.NpgsqlCommand.ExecuteDbDataReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlCommand.ExecuteNonQuery() -> int override Npgsql.NpgsqlCommand.ExecuteNonQueryAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlCommand.ExecuteScalar() -> object? @@ -1616,16 +1683,16 @@ override Npgsql.NpgsqlConnection.CloseAsync() -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.ConnectionString.get -> string! override Npgsql.NpgsqlConnection.ConnectionString.set -> void override Npgsql.NpgsqlConnection.ConnectionTimeout.get -> int -override Npgsql.NpgsqlConnection.Database.get -> string! override Npgsql.NpgsqlConnection.DataSource.get -> string! +override Npgsql.NpgsqlConnection.Database.get -> string! override Npgsql.NpgsqlConnection.DisposeAsync() -> System.Threading.Tasks.ValueTask override Npgsql.NpgsqlConnection.EnlistTransaction(System.Transactions.Transaction? transaction) -> void override Npgsql.NpgsqlConnection.GetSchema() -> System.Data.DataTable! override Npgsql.NpgsqlConnection.GetSchema(string? collectionName) -> System.Data.DataTable! override Npgsql.NpgsqlConnection.GetSchema(string? collectionName, string?[]? restrictions) -> System.Data.DataTable! -override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, string?[]? restrictions, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.GetSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, string?[]? restrictions, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.Open() -> void override Npgsql.NpgsqlConnection.OpenAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.ServerVersion.get -> string! @@ -1635,9 +1702,9 @@ override Npgsql.NpgsqlConnectionStringBuilder.ContainsKey(string! keyword) -> bo override Npgsql.NpgsqlConnectionStringBuilder.Equals(object? obj) -> bool override Npgsql.NpgsqlConnectionStringBuilder.GetHashCode() -> int override Npgsql.NpgsqlConnectionStringBuilder.Remove(string! keyword) -> bool +override Npgsql.NpgsqlConnectionStringBuilder.TryGetValue(string! keyword, out object? value) -> bool override Npgsql.NpgsqlConnectionStringBuilder.this[string! keyword].get -> object! override Npgsql.NpgsqlConnectionStringBuilder.this[string! keyword].set -> void -override Npgsql.NpgsqlConnectionStringBuilder.TryGetValue(string! keyword, out object? value) -> bool override Npgsql.NpgsqlDataReader.Close() -> void override Npgsql.NpgsqlDataReader.CloseAsync() -> System.Threading.Tasks.Task! override Npgsql.NpgsqlDataReader.Depth.get -> int @@ -1648,6 +1715,7 @@ override Npgsql.NpgsqlDataReader.GetByte(int ordinal) -> byte override Npgsql.NpgsqlDataReader.GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) -> long override Npgsql.NpgsqlDataReader.GetChar(int ordinal) -> char override Npgsql.NpgsqlDataReader.GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) -> long +override Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! override Npgsql.NpgsqlDataReader.GetDataTypeName(int ordinal) -> string! override Npgsql.NpgsqlDataReader.GetDateTime(int ordinal) -> System.DateTime override Npgsql.NpgsqlDataReader.GetDecimal(int ordinal) -> decimal @@ -1663,9 +1731,6 @@ override Npgsql.NpgsqlDataReader.GetInt32(int ordinal) -> int override Npgsql.NpgsqlDataReader.GetInt64(int ordinal) -> long override Npgsql.NpgsqlDataReader.GetName(int ordinal) -> string! override Npgsql.NpgsqlDataReader.GetOrdinal(string! name) -> int -override Npgsql.NpgsqlDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! -override Npgsql.NpgsqlDataReader.GetProviderSpecificValue(int ordinal) -> object! -override Npgsql.NpgsqlDataReader.GetProviderSpecificValues(object![]! values) -> int override Npgsql.NpgsqlDataReader.GetSchemaTable() -> System.Data.DataTable? override Npgsql.NpgsqlDataReader.GetSchemaTableAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlDataReader.GetStream(int ordinal) -> System.IO.Stream! @@ -1684,6 +1749,7 @@ override Npgsql.NpgsqlDataReader.ReadAsync(System.Threading.CancellationToken ca override Npgsql.NpgsqlDataReader.RecordsAffected.get -> int override Npgsql.NpgsqlDataReader.this[int ordinal].get -> object! override Npgsql.NpgsqlDataReader.this[string! name].get -> object! +override Npgsql.NpgsqlDataSource.ConnectionString.get -> string! override Npgsql.NpgsqlException.DbBatchCommand.get -> System.Data.Common.DbBatchCommand? override Npgsql.NpgsqlException.IsTransient.get -> bool override Npgsql.NpgsqlFactory.CanCreateBatch.get -> bool @@ -1696,6 +1762,7 @@ override Npgsql.NpgsqlFactory.CreateCommandBuilder() -> System.Data.Common.DbCom override Npgsql.NpgsqlFactory.CreateConnection() -> System.Data.Common.DbConnection! override Npgsql.NpgsqlFactory.CreateConnectionStringBuilder() -> System.Data.Common.DbConnectionStringBuilder! override Npgsql.NpgsqlFactory.CreateDataAdapter() -> System.Data.Common.DbDataAdapter! +override Npgsql.NpgsqlFactory.CreateDataSource(string! connectionString) -> System.Data.Common.DbDataSource! override Npgsql.NpgsqlFactory.CreateParameter() -> System.Data.Common.DbParameter! override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool @@ -1712,6 +1779,7 @@ override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin o override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void override Npgsql.NpgsqlNestedDataReader.Close() -> void override Npgsql.NpgsqlNestedDataReader.Depth.get -> int override Npgsql.NpgsqlNestedDataReader.FieldCount.get -> int @@ -1734,9 +1802,6 @@ override Npgsql.NpgsqlNestedDataReader.GetInt32(int ordinal) -> int override Npgsql.NpgsqlNestedDataReader.GetInt64(int ordinal) -> long override Npgsql.NpgsqlNestedDataReader.GetName(int ordinal) -> string! override Npgsql.NpgsqlNestedDataReader.GetOrdinal(string! name) -> int -override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! -override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValue(int ordinal) -> object! -override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValues(object![]! values) -> int override Npgsql.NpgsqlNestedDataReader.GetString(int ordinal) -> string! override Npgsql.NpgsqlNestedDataReader.GetValue(int ordinal) -> object! override Npgsql.NpgsqlNestedDataReader.GetValues(object![]! values) -> int @@ -1781,18 +1846,18 @@ override Npgsql.NpgsqlRawCopyStream.FlushAsync(System.Threading.CancellationToke override Npgsql.NpgsqlRawCopyStream.Length.get -> long override Npgsql.NpgsqlRawCopyStream.Position.get -> long override Npgsql.NpgsqlRawCopyStream.Position.set -> void -override Npgsql.NpgsqlRawCopyStream.Read(byte[]! buffer, int offset, int count) -> int override Npgsql.NpgsqlRawCopyStream.Read(System.Span span) -> int -override Npgsql.NpgsqlRawCopyStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlRawCopyStream.Read(byte[]! buffer, int offset, int count) -> int override Npgsql.NpgsqlRawCopyStream.ReadAsync(System.Memory buffer, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.ValueTask +override Npgsql.NpgsqlRawCopyStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlRawCopyStream.ReadTimeout.get -> int override Npgsql.NpgsqlRawCopyStream.ReadTimeout.set -> void override Npgsql.NpgsqlRawCopyStream.Seek(long offset, System.IO.SeekOrigin origin) -> long override Npgsql.NpgsqlRawCopyStream.SetLength(long value) -> void -override Npgsql.NpgsqlRawCopyStream.Write(byte[]! buffer, int offset, int count) -> void override Npgsql.NpgsqlRawCopyStream.Write(System.ReadOnlySpan buffer) -> void -override Npgsql.NpgsqlRawCopyStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlRawCopyStream.Write(byte[]! buffer, int offset, int count) -> void override Npgsql.NpgsqlRawCopyStream.WriteAsync(System.ReadOnlyMemory buffer, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +override Npgsql.NpgsqlRawCopyStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlRawCopyStream.WriteTimeout.get -> int override Npgsql.NpgsqlRawCopyStream.WriteTimeout.set -> void override Npgsql.NpgsqlTransaction.Commit() -> void @@ -1803,10 +1868,11 @@ override Npgsql.NpgsqlTransaction.Release(string! name) -> void override Npgsql.NpgsqlTransaction.ReleaseAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.Rollback() -> void override Npgsql.NpgsqlTransaction.Rollback(string! name) -> void -override Npgsql.NpgsqlTransaction.RollbackAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.RollbackAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlTransaction.RollbackAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.Save(string! name) -> void override Npgsql.NpgsqlTransaction.SaveAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlTransaction.SupportsSavepoints.get -> bool override Npgsql.PostgresException.GetObjectData(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void override Npgsql.PostgresException.IsTransient.get -> bool override Npgsql.PostgresException.SqlState.get -> string! @@ -1826,29 +1892,25 @@ override Npgsql.Schema.NpgsqlDbColumn.this[string! propertyName].get -> object? override NpgsqlTypes.NpgsqlBox.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlBox.GetHashCode() -> int override NpgsqlTypes.NpgsqlBox.ToString() -> string! +override NpgsqlTypes.NpgsqlCidr.ToString() -> string! override NpgsqlTypes.NpgsqlCircle.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCircle.GetHashCode() -> int override NpgsqlTypes.NpgsqlCircle.ToString() -> string! -override NpgsqlTypes.NpgsqlDate.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlDate.GetHashCode() -> int -override NpgsqlTypes.NpgsqlDate.ToString() -> string! -override NpgsqlTypes.NpgsqlDateTime.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlDateTime.GetHashCode() -> int -override NpgsqlTypes.NpgsqlDateTime.ToString() -> string! -override NpgsqlTypes.NpgsqlInet.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int +override NpgsqlTypes.NpgsqlCube.Equals(object? obj) -> bool +override NpgsqlTypes.NpgsqlCube.GetHashCode() -> int +override NpgsqlTypes.NpgsqlCube.ToString() -> string! override NpgsqlTypes.NpgsqlInet.ToString() -> string! override NpgsqlTypes.NpgsqlInterval.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlInterval.GetHashCode() -> int +override NpgsqlTypes.NpgsqlLSeg.Equals(object? obj) -> bool +override NpgsqlTypes.NpgsqlLSeg.GetHashCode() -> int +override NpgsqlTypes.NpgsqlLSeg.ToString() -> string! override NpgsqlTypes.NpgsqlLine.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlLine.GetHashCode() -> int override NpgsqlTypes.NpgsqlLine.ToString() -> string! override NpgsqlTypes.NpgsqlLogSequenceNumber.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlLogSequenceNumber.GetHashCode() -> int override NpgsqlTypes.NpgsqlLogSequenceNumber.ToString() -> string! -override NpgsqlTypes.NpgsqlLSeg.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlLSeg.GetHashCode() -> int -override NpgsqlTypes.NpgsqlLSeg.ToString() -> string! override NpgsqlTypes.NpgsqlPath.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlPath.GetHashCode() -> int override NpgsqlTypes.NpgsqlPath.ToString() -> string! @@ -1868,9 +1930,6 @@ override NpgsqlTypes.NpgsqlRange.ToString() -> string! override NpgsqlTypes.NpgsqlTid.Equals(object? o) -> bool override NpgsqlTypes.NpgsqlTid.GetHashCode() -> int override NpgsqlTypes.NpgsqlTid.ToString() -> string! -override NpgsqlTypes.NpgsqlTimeSpan.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlTimeSpan.GetHashCode() -> int -override NpgsqlTypes.NpgsqlTimeSpan.ToString() -> string! override NpgsqlTypes.NpgsqlTsQuery.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlTsQuery.GetHashCode() -> int override NpgsqlTypes.NpgsqlTsQuery.ToString() -> string! @@ -1911,82 +1970,43 @@ override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.get -> bool override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.set -> void override sealed Npgsql.NpgsqlParameter.SourceVersion.get -> System.Data.DataRowVersion override sealed Npgsql.NpgsqlParameter.SourceVersion.set -> void -static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.get -> bool -static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.set -> void -static Npgsql.Logging.NpgsqlLogManager.Provider.get -> Npgsql.Logging.INpgsqlLoggingProvider! -static Npgsql.Logging.NpgsqlLogManager.Provider.set -> void -static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name) -> string! +static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name, System.Globalization.CultureInfo? culture = null) -> string! static Npgsql.NpgsqlCommandBuilder.DeriveParameters(Npgsql.NpgsqlCommand! command) -> void static Npgsql.NpgsqlConnection.ClearAllPools() -> void static Npgsql.NpgsqlConnection.ClearPool(Npgsql.NpgsqlConnection! connection) -> void static Npgsql.NpgsqlConnection.GlobalTypeMapper.get -> Npgsql.TypeMapping.INpgsqlTypeMapper! -static Npgsql.NpgsqlConnection.MapCompositeGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.NpgsqlConnection.MapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.NpgsqlConnection.UnmapCompositeGlobally(string! pgName, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.NpgsqlConnection.UnmapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +static Npgsql.NpgsqlDataSource.Create(Npgsql.NpgsqlConnectionStringBuilder! connectionStringBuilder) -> Npgsql.NpgsqlDataSource! +static Npgsql.NpgsqlDataSource.Create(string! connectionString) -> Npgsql.NpgsqlDataSource! +static Npgsql.NpgsqlLoggingConfiguration.InitializeLogging(Microsoft.Extensions.Logging.ILoggerFactory! loggerFactory, bool parameterLoggingEnabled = false) -> void +static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.StartLogicalReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.Internal.LogicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null, System.Collections.Generic.IEnumerable>? options = null, bool bypassingStream = false) -> System.Collections.Generic.IAsyncEnumerable! -static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! static Npgsql.Replication.PgOutputConnectionExtensions.StartReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.PgOutput.PgOutputReplicationSlot! slot, Npgsql.Replication.PgOutput.PgOutputReplicationOptions! options, System.Threading.CancellationToken cancellationToken, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null) -> System.Collections.Generic.IAsyncEnumerable! -static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! static Npgsql.Replication.TestDecodingConnectionExtensions.StartReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.TestDecoding.TestDecodingReplicationSlot! slot, System.Threading.CancellationToken cancellationToken, Npgsql.Replication.TestDecoding.TestDecodingOptions? options = null, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null) -> System.Collections.Generic.IAsyncEnumerable! static NpgsqlTypes.NpgsqlBox.operator !=(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool static NpgsqlTypes.NpgsqlBox.operator ==(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool -static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox +static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! +static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet static NpgsqlTypes.NpgsqlCircle.operator !=(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.operator ==(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool -static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle -static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.explicit operator System.DateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -static NpgsqlTypes.NpgsqlDate.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -static NpgsqlTypes.NpgsqlDate.Now.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator !=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDate date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate dateX, NpgsqlTypes.NpgsqlDate dateY) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlDate.operator <(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator <=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator ==(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator >(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator >=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.Parse(string! str) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.ToDateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -static NpgsqlTypes.NpgsqlDate.ToDateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -static NpgsqlTypes.NpgsqlDate.Today.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.Tomorrow.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.TryParse(string! str, out NpgsqlTypes.NpgsqlDate date) -> bool -static NpgsqlTypes.NpgsqlDate.Yesterday.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDateTime.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDateTime npgsqlDateTime) -> System.DateTime -static NpgsqlTypes.NpgsqlDateTime.implicit operator NpgsqlTypes.NpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.Now.get -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator !=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlDateTime.operator <(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator <=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator ==(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator >(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator >=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.Parse(string! str) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.ToNpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime +static NpgsqlTypes.NpgsqlCube.operator !=(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool +static NpgsqlTypes.NpgsqlCube.operator ==(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool static NpgsqlTypes.NpgsqlInet.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet -static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool -static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool -static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! -static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPNetwork cidr) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlLSeg.operator !=(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool +static NpgsqlTypes.NpgsqlLSeg.operator ==(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool static NpgsqlTypes.NpgsqlLine.operator !=(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLine.operator ==(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool -static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine +static NpgsqlTypes.NpgsqlLogSequenceNumber.Larger(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(System.ReadOnlySpan s) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(string! s) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.Smaller(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(System.ReadOnlySpan s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool +static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(string! s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator NpgsqlTypes.NpgsqlLogSequenceNumber(ulong value) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator ulong(NpgsqlTypes.NpgsqlLogSequenceNumber value) -> ulong -static NpgsqlTypes.NpgsqlLogSequenceNumber.Larger(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.operator !=(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.operator +(NpgsqlTypes.NpgsqlLogSequenceNumber lsn, double nbytes) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.operator -(NpgsqlTypes.NpgsqlLogSequenceNumber first, NpgsqlTypes.NpgsqlLogSequenceNumber second) -> ulong @@ -1996,77 +2016,29 @@ static NpgsqlTypes.NpgsqlLogSequenceNumber.operator <=(NpgsqlTypes.NpgsqlLogSequ static NpgsqlTypes.NpgsqlLogSequenceNumber.operator ==(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.operator >(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.operator >=(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool -static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(string! s) -> NpgsqlTypes.NpgsqlLogSequenceNumber -static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(System.ReadOnlySpan s) -> NpgsqlTypes.NpgsqlLogSequenceNumber -static NpgsqlTypes.NpgsqlLogSequenceNumber.Smaller(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber -static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(string! s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool -static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(System.ReadOnlySpan s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool -static NpgsqlTypes.NpgsqlLSeg.operator !=(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool -static NpgsqlTypes.NpgsqlLSeg.operator ==(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool -static NpgsqlTypes.NpgsqlLSeg.Parse(string! s) -> NpgsqlTypes.NpgsqlLSeg static NpgsqlTypes.NpgsqlPath.operator !=(NpgsqlTypes.NpgsqlPath x, NpgsqlTypes.NpgsqlPath y) -> bool static NpgsqlTypes.NpgsqlPath.operator ==(NpgsqlTypes.NpgsqlPath x, NpgsqlTypes.NpgsqlPath y) -> bool -static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath static NpgsqlTypes.NpgsqlPoint.operator !=(NpgsqlTypes.NpgsqlPoint x, NpgsqlTypes.NpgsqlPoint y) -> bool static NpgsqlTypes.NpgsqlPoint.operator ==(NpgsqlTypes.NpgsqlPoint x, NpgsqlTypes.NpgsqlPoint y) -> bool -static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint static NpgsqlTypes.NpgsqlPolygon.operator !=(NpgsqlTypes.NpgsqlPolygon x, NpgsqlTypes.NpgsqlPolygon y) -> bool static NpgsqlTypes.NpgsqlPolygon.operator ==(NpgsqlTypes.NpgsqlPolygon x, NpgsqlTypes.NpgsqlPolygon y) -> bool -static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon -static NpgsqlTypes.NpgsqlRange.operator !=(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool -static NpgsqlTypes.NpgsqlRange.operator ==(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool static NpgsqlTypes.NpgsqlRange.Parse(string! value) -> NpgsqlTypes.NpgsqlRange static NpgsqlTypes.NpgsqlRange.RangeTypeConverter.Register() -> void +static NpgsqlTypes.NpgsqlRange.operator !=(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool +static NpgsqlTypes.NpgsqlRange.operator ==(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool static NpgsqlTypes.NpgsqlTid.operator !=(NpgsqlTypes.NpgsqlTid left, NpgsqlTypes.NpgsqlTid right) -> bool static NpgsqlTypes.NpgsqlTid.operator ==(NpgsqlTypes.NpgsqlTid left, NpgsqlTypes.NpgsqlTid right) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.Compare(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> int -static NpgsqlTypes.NpgsqlTimeSpan.explicit operator System.TimeSpan(NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromDays(double days) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromHours(double hours) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMicroseconds(double micro) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMilliseconds(double milli) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMinutes(double minutes) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMonths(double months) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromSeconds(double seconds) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromTicks(long ticks) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.implicit operator NpgsqlTypes.NpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator !=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator <(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator <=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator ==(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator >(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator >=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.Parse(string! str) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.Plus(in NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.ToNpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.ToTimeSpan(in NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.TryParse(string! str, out NpgsqlTypes.NpgsqlTimeSpan result) -> bool +static NpgsqlTypes.NpgsqlTsQuery.Parse(string! value) -> NpgsqlTypes.NpgsqlTsQuery! static NpgsqlTypes.NpgsqlTsQuery.operator !=(NpgsqlTypes.NpgsqlTsQuery? left, NpgsqlTypes.NpgsqlTsQuery? right) -> bool static NpgsqlTypes.NpgsqlTsQuery.operator ==(NpgsqlTypes.NpgsqlTsQuery? left, NpgsqlTypes.NpgsqlTsQuery? right) -> bool -static NpgsqlTypes.NpgsqlTsQuery.Parse(string! value) -> NpgsqlTypes.NpgsqlTsQuery! -static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator !=(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool -static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool static NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.operator !=(NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos left, NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos right) -> bool static NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos left, NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos right) -> bool +static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator !=(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool +static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool static NpgsqlTypes.NpgsqlTsVector.Parse(string! value) -> NpgsqlTypes.NpgsqlTsVector! static readonly Npgsql.NpgsqlFactory.Instance -> Npgsql.NpgsqlFactory! -static readonly NpgsqlTypes.NpgsqlDate.Epoch -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.Era -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.Infinity -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.MaxCalculableValue -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.MinCalculableValue -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.NegativeInfinity -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDateTime.Epoch -> NpgsqlTypes.NpgsqlDateTime -static readonly NpgsqlTypes.NpgsqlDateTime.Era -> NpgsqlTypes.NpgsqlDateTime -static readonly NpgsqlTypes.NpgsqlDateTime.Infinity -> NpgsqlTypes.NpgsqlDateTime -static readonly NpgsqlTypes.NpgsqlDateTime.NegativeInfinity -> NpgsqlTypes.NpgsqlDateTime static readonly NpgsqlTypes.NpgsqlLogSequenceNumber.Invalid -> NpgsqlTypes.NpgsqlLogSequenceNumber static readonly NpgsqlTypes.NpgsqlRange.Empty -> NpgsqlTypes.NpgsqlRange -static readonly NpgsqlTypes.NpgsqlTimeSpan.MaxValue -> NpgsqlTypes.NpgsqlTimeSpan -static readonly NpgsqlTypes.NpgsqlTimeSpan.MinValue -> NpgsqlTypes.NpgsqlTimeSpan -static readonly NpgsqlTypes.NpgsqlTimeSpan.Zero -> NpgsqlTypes.NpgsqlTimeSpan -virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! \ No newline at end of file +static readonly NpgsqlTypes.NpgsqlTsVector.Empty -> NpgsqlTypes.NpgsqlTsVector! +virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! +virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index b032a873c1..f91f353b64 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,367 +1,45 @@ -#nullable enable -Npgsql.NpgsqlBatch.EnableErrorBarriers.get -> bool -Npgsql.NpgsqlBatch.EnableErrorBarriers.set -> void -Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.get -> bool? -Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.set -> void -Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! -Npgsql.NpgsqlDataSource -Npgsql.NpgsqlDataSource.CreateBatch() -> Npgsql.NpgsqlBatch! -Npgsql.NpgsqlDataSource.CreateCommand(string? commandText = null) -> Npgsql.NpgsqlCommand! -Npgsql.NpgsqlDataSource.CreateConnection() -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlDataSource.OpenConnection() -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlDataSource.OpenConnectionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask -Npgsql.NpgsqlDataSource.Password.set -> void -Npgsql.NpgsqlDataSourceBuilder -Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void -Npgsql.NpgsqlDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! -Npgsql.NpgsqlDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! -Npgsql.NpgsqlDataSourceBuilder.ConnectionString.get -> string! -Npgsql.NpgsqlDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! -Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! -Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.set -> void -Npgsql.NpgsqlDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.NpgsqlDataSourceBuilder(string? connectionString = null) -> void -Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlLoggingConfiguration -Npgsql.NpgsqlMultiHostDataSource -Npgsql.NpgsqlMultiHostDataSource.ClearDatabaseStates() -> void -Npgsql.NpgsqlMultiHostDataSource.CreateConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlMultiHostDataSource.OpenConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlMultiHostDataSource.OpenConnectionAsync(Npgsql.TargetSessionAttributes targetSessionAttributes, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask -Npgsql.NpgsqlMultiHostDataSource.WithTargetSession(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlDataSource! -Npgsql.Replication.PgOutput.Messages.BeginPrepareMessage -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.TransactionCommitTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage -Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage.TransactionGid.get -> string! -Npgsql.Replication.PgOutput.Messages.PrepareMessage -Npgsql.Replication.PgOutput.Messages.PrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags -Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags -Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.TransactionPrepareTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.PreparedTransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionPrepareTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionRollbackTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.TwoPhase.get -> bool? -Npgsql.Replication.PhysicalReplicationConnection.ReadReplicationSlot(string! slotName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void -Npgsql.Replication.PhysicalReplicationSlot.RestartLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber? -Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? -Npgsql.Schema.NpgsqlDbColumn.IsIdentity.get -> bool? -Npgsql.Schema.NpgsqlDbColumn.IsIdentity.set -> void -Npgsql.StatementType.Call = 11 -> Npgsql.StatementType -Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.set -> void -override Npgsql.NpgsqlCommand.CreateDbParameter() -> System.Data.Common.DbParameter! -override Npgsql.NpgsqlCommand.DbConnection.get -> System.Data.Common.DbConnection? -override Npgsql.NpgsqlCommand.DbConnection.set -> void -override Npgsql.NpgsqlCommand.DbParameterCollection.get -> System.Data.Common.DbParameterCollection! -override Npgsql.NpgsqlCommand.DbTransaction.get -> System.Data.Common.DbTransaction? -override Npgsql.NpgsqlCommand.DbTransaction.set -> void -override Npgsql.NpgsqlCommand.Dispose(bool disposing) -> void -override Npgsql.NpgsqlCommand.ExecuteDbDataReader(System.Data.CommandBehavior behavior) -> System.Data.Common.DbDataReader! -override Npgsql.NpgsqlCommand.ExecuteDbDataReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -override Npgsql.NpgsqlDataSource.ConnectionString.get -> string! -override Npgsql.NpgsqlFactory.CreateDataSource(string! connectionString) -> System.Data.Common.DbDataSource! -static Npgsql.NpgsqlDataSource.Create(Npgsql.NpgsqlConnectionStringBuilder! connectionStringBuilder) -> Npgsql.NpgsqlDataSource! -static Npgsql.NpgsqlDataSource.Create(string! connectionString) -> Npgsql.NpgsqlDataSource! -static Npgsql.NpgsqlLoggingConfiguration.InitializeLogging(Microsoft.Extensions.Logging.ILoggerFactory! loggerFactory, bool parameterLoggingEnabled = false) -> void -static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! - -*REMOVED*Npgsql.NpgsqlConnection.Settings.get -> Npgsql.NpgsqlConnectionStringBuilder! -*REMOVED*abstract Npgsql.Logging.NpgsqlLogger.IsEnabled(Npgsql.Logging.NpgsqlLogLevel level) -> bool -*REMOVED*abstract Npgsql.Logging.NpgsqlLogger.Log(Npgsql.Logging.NpgsqlLogLevel level, int connectorId, string! msg, System.Exception? exception = null) -> void -*REMOVED*const NpgsqlTypes.NpgsqlDate.MaxYear = 5874897 -> int -*REMOVED*const NpgsqlTypes.NpgsqlDate.MinYear = -4714 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.DaysPerMonth = 30 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.HoursPerDay = 24 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.MonthsPerYear = 12 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerDay = 864000000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerHour = 36000000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMicrosecond = 10 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMillsecond = 10000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMinute = 600000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMonth = 25920000000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerSecond = 10000000 -> long -*REMOVED*Npgsql.NpgsqlConnection.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*Npgsql.NpgsqlConnection.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.get -> Npgsql.PhysicalOpenAsyncCallback? -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.set -> void -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenCallback.get -> Npgsql.PhysicalOpenCallback? -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenCallback.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute() -> void -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute(params string![]! synonyms) -> void -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute.Synonyms.get -> string![]! -*REMOVED*Npgsql.Logging.ConsoleLoggingProvider -*REMOVED*Npgsql.Logging.ConsoleLoggingProvider.ConsoleLoggingProvider(Npgsql.Logging.NpgsqlLogLevel minLevel = Npgsql.Logging.NpgsqlLogLevel.Info, bool printLevel = false, bool printConnectorId = false) -> void -*REMOVED*Npgsql.Logging.ConsoleLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -*REMOVED*Npgsql.Logging.INpgsqlLoggingProvider -*REMOVED*Npgsql.Logging.INpgsqlLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -*REMOVED*Npgsql.Logging.NpgsqlLogger -*REMOVED*Npgsql.Logging.NpgsqlLogger.NpgsqlLogger() -> void -*REMOVED*Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Debug = 2 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Error = 5 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Fatal = 6 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Info = 3 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Trace = 1 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Warn = 4 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogManager -*REMOVED*Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! -*REMOVED*Npgsql.NpgsqlDataReader.GetDate(int ordinal) -> NpgsqlTypes.NpgsqlDate -*REMOVED*Npgsql.NpgsqlDataReader.GetInterval(int ordinal) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*Npgsql.NpgsqlDataReader.GetTimeStamp(int ordinal) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*Npgsql.PhysicalOpenAsyncCallback -*REMOVED*Npgsql.PhysicalOpenCallback -*REMOVED*Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName) -> void -*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.AddDays(int days) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.AddMonths(int months) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.AddYears(int years) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.Compare(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.Compare(object? x, object? y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.CompareTo(NpgsqlTypes.NpgsqlDate other) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.CompareTo(object? o) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.Day.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.DayOfWeek.get -> System.DayOfWeek -*REMOVED*NpgsqlTypes.NpgsqlDate.DayOfYear.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.Equals(NpgsqlTypes.NpgsqlDate other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsFinite.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsLeapYear.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsNegativeInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.Month.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate() -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(int year, int month, int day) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(NpgsqlTypes.NpgsqlDate copyFrom) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateOnly date) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateTime dateTime) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.Year.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Add(in NpgsqlTypes.NpgsqlTimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Add(System.TimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddDays(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddHours(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddMilliseconds(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddMinutes(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddMonths(int value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddSeconds(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddTicks(long value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddYears(int value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Compare(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Compare(object? x, object? y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.CompareTo(NpgsqlTypes.NpgsqlDateTime other) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.CompareTo(object? o) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Date.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Day.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.DayOfWeek.get -> System.DayOfWeek -*REMOVED*NpgsqlTypes.NpgsqlDateTime.DayOfYear.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Equals(NpgsqlTypes.NpgsqlDateTime other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Hour.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsFinite.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsLeapYear.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsNegativeInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Kind.get -> System.DateTimeKind -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Millisecond.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Minute.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Month.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Normalize() -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime() -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, int milliseconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks, System.DateTimeKind kind) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date, System.TimeSpan time, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(System.DateTime dateTime) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Second.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Subtract(NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Ticks.get -> long -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Time.get -> System.TimeSpan -*REMOVED*NpgsqlTypes.NpgsqlDateTime.ToDateTime() -> System.DateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.ToLocalTime() -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.ToUniversalTime() -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Year.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Canonicalize() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.CompareTo(NpgsqlTypes.NpgsqlTimeSpan other) -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.CompareTo(object? other) -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Days.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Duration() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Equals(NpgsqlTypes.NpgsqlTimeSpan other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Hours.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.JustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.JustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.JustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Microseconds.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Milliseconds.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Minutes.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Months.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Negate() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan() -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds, int milliseconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, long ticks) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int years, int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(long ticks) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(System.TimeSpan timespan) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Seconds.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Ticks.get -> long -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Time.get -> System.TimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalDays.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalHours.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMicroseconds.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMilliseconds.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMinutes.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMonths.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalSeconds.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalTicks.get -> long -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.UnjustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.UnjustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.UnjustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*override NpgsqlTypes.NpgsqlDate.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlDate.GetHashCode() -> int -*REMOVED*override NpgsqlTypes.NpgsqlDate.ToString() -> string! -*REMOVED*override NpgsqlTypes.NpgsqlDateTime.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlDateTime.GetHashCode() -> int -*REMOVED*override NpgsqlTypes.NpgsqlDateTime.ToString() -> string! -*REMOVED*override NpgsqlTypes.NpgsqlTimeSpan.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlTimeSpan.GetHashCode() -> int -*REMOVED*override NpgsqlTypes.NpgsqlTimeSpan.ToString() -> string! -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.get -> bool -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.set -> void -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.Provider.get -> Npgsql.Logging.INpgsqlLoggingProvider! -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.Provider.set -> void -*REMOVED*static Npgsql.NpgsqlConnection.MapCompositeGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.NpgsqlConnection.MapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.NpgsqlConnection.UnmapCompositeGlobally(string! pgName, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.NpgsqlConnection.UnmapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator System.DateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -*REMOVED*static NpgsqlTypes.NpgsqlDate.Now.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator !=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDate date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate dateX, NpgsqlTypes.NpgsqlDate dateY) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator <(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator <=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator ==(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator >(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator >=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.Parse(string! str) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToDateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToDateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -*REMOVED*static NpgsqlTypes.NpgsqlDate.Today.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.Tomorrow.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.TryParse(string! str, out NpgsqlTypes.NpgsqlDate date) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.Yesterday.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDateTime npgsqlDateTime) -> System.DateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.implicit operator NpgsqlTypes.NpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.Now.get -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator !=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator <(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator <=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator ==(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator >(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator >=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.Parse(string! str) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.ToNpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.Compare(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> int -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.explicit operator System.TimeSpan(NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromDays(double days) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromHours(double hours) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMicroseconds(double micro) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMilliseconds(double milli) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMinutes(double minutes) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMonths(double months) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromSeconds(double seconds) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromTicks(long ticks) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.implicit operator NpgsqlTypes.NpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator !=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator <(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator <=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator ==(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator >(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator >=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.Parse(string! str) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.Plus(in NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.ToNpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.ToTimeSpan(in NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.TryParse(string! str, out NpgsqlTypes.NpgsqlTimeSpan result) -> bool -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.Epoch -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.Era -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.Infinity -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.MaxCalculableValue -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.MinCalculableValue -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.NegativeInfinity -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.Epoch -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.Era -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.Infinity -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.NegativeInfinity -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlTimeSpan.MaxValue -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static readonly NpgsqlTypes.NpgsqlTimeSpan.MinValue -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static readonly NpgsqlTypes.NpgsqlTimeSpan.Zero -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +#nullable enable +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint +*REMOVED*Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemote(uint oid, string! path) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemoteAsync(uint oid, string! path, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Has64BitSupport.get -> bool +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemote(string! path, uint oid = 0) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemoteAsync(string! path, uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.get -> int +*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.set -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.NpgsqlLargeObjectManager(Npgsql.NpgsqlConnection! connection) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenRead(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWrite(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWriteAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Unlink(uint oid) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.UnlinkAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream +*REMOVED*Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool +*REMOVED*Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanTimeout.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanWrite.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Close() -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Flush() -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Length.get -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.get -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.set -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Read(byte[]! buffer, int offset, int count) -> int +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin origin) -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +NpgsqlTypes.NpgsqlRange.LowerBound.get -> T? +NpgsqlTypes.NpgsqlRange.UpperBound.get -> T? +NpgsqlTypes.NpgsqlRange.NpgsqlRange(T? lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T? upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) -> void diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs index dcc58d9fff..ef066e1158 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs @@ -1,7 +1,6 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; -using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading; @@ -62,67 +61,58 @@ public static Task CreateLogicalReplicationSlot( CancellationToken cancellationToken = default) { connection.CheckDisposed(); + ArgumentNullException.ThrowIfNull(slotName); + ArgumentNullException.ThrowIfNull(outputPlugin); - using var _ = NoSynchronizationContextScope.Enter(); - return CreateLogicalReplicationSlotCore(); + cancellationToken.ThrowIfCancellationRequested(); - Task CreateLogicalReplicationSlotCore() + var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); + if (isTemporary) + builder.Append(" TEMPORARY"); + builder.Append(" LOGICAL ").Append(outputPlugin); + if (connection.PostgreSqlVersion.Major >= 15 && (slotSnapshotInitMode.HasValue || twoPhase)) { - if (slotName is null) - throw new ArgumentNullException(nameof(slotName)); - if (outputPlugin is null) - throw new ArgumentNullException(nameof(outputPlugin)); - - cancellationToken.ThrowIfCancellationRequested(); - - var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); - if (isTemporary) - builder.Append(" TEMPORARY"); - builder.Append(" LOGICAL ").Append(outputPlugin); - if (connection.PostgreSqlVersion.Major >= 15 && (slotSnapshotInitMode.HasValue || twoPhase)) - { - builder.Append('('); - if (slotSnapshotInitMode.HasValue) - { - builder.Append(slotSnapshotInitMode switch - { - LogicalSlotSnapshotInitMode.Export => "SNAPSHOT 'export'", - LogicalSlotSnapshotInitMode.Use => "SNAPSHOT 'use'", - LogicalSlotSnapshotInitMode.NoExport => "SNAPSHOT 'nothing'", - _ => throw new ArgumentOutOfRangeException(nameof(slotSnapshotInitMode), - slotSnapshotInitMode, - $"Unexpected value {slotSnapshotInitMode} for argument {nameof(slotSnapshotInitMode)}.") - }); - if (twoPhase) - builder.Append(",TWO_PHASE"); - } - else - builder.Append("TWO_PHASE"); - builder.Append(')'); - } - else + builder.Append('('); + if (slotSnapshotInitMode.HasValue) { builder.Append(slotSnapshotInitMode switch { - // EXPORT_SNAPSHOT is the default since it has been introduced. - // We don't set it unless it is explicitly requested so that older backends can digest the query too. - null => string.Empty, - LogicalSlotSnapshotInitMode.Export => " EXPORT_SNAPSHOT", - LogicalSlotSnapshotInitMode.Use => " USE_SNAPSHOT", - LogicalSlotSnapshotInitMode.NoExport => " NOEXPORT_SNAPSHOT", + LogicalSlotSnapshotInitMode.Export => "SNAPSHOT 'export'", + LogicalSlotSnapshotInitMode.Use => "SNAPSHOT 'use'", + LogicalSlotSnapshotInitMode.NoExport => "SNAPSHOT 'nothing'", _ => throw new ArgumentOutOfRangeException(nameof(slotSnapshotInitMode), slotSnapshotInitMode, $"Unexpected value {slotSnapshotInitMode} for argument {nameof(slotSnapshotInitMode)}.") }); if (twoPhase) - builder.Append(" TWO_PHASE"); + builder.Append(",TWO_PHASE"); } - var command = builder.ToString(); + else + builder.Append("TWO_PHASE"); + builder.Append(')'); + } + else + { + builder.Append(slotSnapshotInitMode switch + { + // EXPORT_SNAPSHOT is the default since it has been introduced. + // We don't set it unless it is explicitly requested so that older backends can digest the query too. + null => string.Empty, + LogicalSlotSnapshotInitMode.Export => " EXPORT_SNAPSHOT", + LogicalSlotSnapshotInitMode.Use => " USE_SNAPSHOT", + LogicalSlotSnapshotInitMode.NoExport => " NOEXPORT_SNAPSHOT", + _ => throw new ArgumentOutOfRangeException(nameof(slotSnapshotInitMode), + slotSnapshotInitMode, + $"Unexpected value {slotSnapshotInitMode} for argument {nameof(slotSnapshotInitMode)}.") + }); + if (twoPhase) + builder.Append(" TWO_PHASE"); + } + var command = builder.ToString(); - LogMessages.CreatingReplicationSlot(connection.ReplicationLogger, slotName, command, connection.Connector.Id); + LogMessages.CreatingReplicationSlot(connection.ReplicationLogger, slotName, command, connection.Connector.Id); - return connection.CreateReplicationSlot(command, cancellationToken); - } + return connection.CreateReplicationSlot(command, cancellationToken); } /// @@ -150,9 +140,9 @@ public static IAsyncEnumerable StartLogicalReplication( IEnumerable>? options = null, bool bypassingStream = false) { - using (NoSynchronizationContextScope.Enter()) - return StartLogicalReplicationInternal(connection, slot, cancellationToken, walLocation, options, bypassingStream); + return StartLogicalReplicationInternal(connection, slot, cancellationToken, walLocation, options, bypassingStream); + // Local method to avoid having to add the EnumeratorCancellation attribute to the public signature. static async IAsyncEnumerable StartLogicalReplicationInternal( LogicalReplicationConnection connection, LogicalReplicationSlot slot, @@ -166,12 +156,18 @@ static async IAsyncEnumerable StartLogicalReplicationInternal( .Append(" LOGICAL ") .Append(walLocation ?? slot.ConsistentPoint); - if (options?.Any() == true) + var opts = new List>(options ?? Array.Empty>()); + if (opts.Count > 0) { - builder - .Append(" (") - .Append(string.Join(", ", options.Select(kv => @$"""{kv.Key}""{(kv.Value is null ? "" : $" '{kv.Value}'")}"))) - .Append(')'); + builder.Append(" ("); + var stringOptions = new string[opts.Count]; + for (var i = 0; i < opts.Count; i++) + { + var kv = opts[i]; + stringOptions[i] = @$"""{kv.Key}""{(kv.Value is null ? "" : $" '{kv.Value}'")}"; + } + builder.Append(string.Join(", ", stringOptions)); + builder.Append(')'); } var command = builder.ToString(); @@ -179,8 +175,8 @@ static async IAsyncEnumerable StartLogicalReplicationInternal( LogMessages.StartingLogicalReplication(connection.ReplicationLogger, slot.Name, command, connection.Connector.Id); var enumerator = connection.StartReplicationInternalWrapper(command, bypassingStream, cancellationToken); - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) yield return enumerator.Current; } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs b/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs index 5edfa5d823..dddb0d77a4 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.Internal; diff --git a/src/Npgsql/Replication/LogicalReplicationConnection.cs b/src/Npgsql/Replication/LogicalReplicationConnection.cs index 7172b8a060..8a71a0c511 100644 --- a/src/Npgsql/Replication/LogicalReplicationConnection.cs +++ b/src/Npgsql/Replication/LogicalReplicationConnection.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Represents a logical replication connection to a PostgreSQL server. diff --git a/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs b/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs index 3e71c7ca7b..9c287d431b 100644 --- a/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs +++ b/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Decides what to do with the snapshot created during logical slot initialization. diff --git a/src/Npgsql/Replication/PgDateTime.cs b/src/Npgsql/Replication/PgDateTime.cs new file mode 100644 index 0000000000..aa68bda7f6 --- /dev/null +++ b/src/Npgsql/Replication/PgDateTime.cs @@ -0,0 +1,16 @@ +using System; + +namespace Npgsql.Replication; + +static class PgDateTime +{ + const long PostgresTimestampOffsetTicks = 630822816000000000L; + + public static DateTime DecodeTimestamp(long value, DateTimeKind kind) + => new(value * 10 + PostgresTimestampOffsetTicks, kind); + + public static long EncodeTimestamp(DateTime value) + // Rounding here would cause problems because we would round up DateTime.MaxValue + // which would make it impossible to retrieve it back from the database, so we just drop the additional precision + => (value.Ticks - PostgresTimestampOffsetTicks) / 10; +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs index 6fbfcb2c37..e64c6a6275 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs index 288bff1e03..74840af38a 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs index f2f0b16525..38cc1f1046 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs index 7ed189a981..a98284644d 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs index 8a9a34741d..6fd36d7ea0 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs index eaacd1ab0c..bbca233e6b 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; @@ -13,12 +13,6 @@ public abstract class DeleteMessage : TransactionalMessage /// public RelationMessage Relation { get; private set; } = null!; - /// - /// ID of the relation corresponding to the ID in the relation message. - /// - [Obsolete("Use Relation.RelationId")] - public uint RelationId => Relation.RelationId; - private protected DeleteMessage() {} private protected DeleteMessage Populate( @@ -31,4 +25,4 @@ private protected DeleteMessage Populate( return this; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs index 933b50ac68..cb5dec77ec 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs @@ -1,6 +1,5 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs index 7da8f77c68..572095d615 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.Generic; +using System; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs index 14f31b1672..26bf38c83c 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.Generic; +using System; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs index d0f67841e9..a11a21de38 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs @@ -1,6 +1,5 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; @@ -19,12 +18,6 @@ public sealed class InsertMessage : TransactionalMessage /// public RelationMessage Relation { get; private set; } = null!; - /// - /// ID of the relation corresponding to the ID in the relation message. - /// - [Obsolete("Use Relation.RelationId")] - public uint RelationId => Relation.RelationId; - /// /// Columns representing the new row. /// @@ -47,4 +40,4 @@ internal InsertMessage Populate( internal Task Consume(CancellationToken cancellationToken) => _tupleEnumerable.Consume(cancellationToken); -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs index 9905d44753..5d589f8526 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs @@ -1,6 +1,5 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs index 0add6103e6..d49a908dbc 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using NpgsqlTypes; diff --git a/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs index 8356cc997a..a9be84ea26 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs index b93e27fa3c..b2ccdc73c3 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs @@ -1,6 +1,3 @@ -using NpgsqlTypes; -using System; - namespace Npgsql.Replication.PgOutput.Messages; /// diff --git a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs index 16cd8fa36b..0f01b4c4d9 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs index 0eda1b18d3..98f4e208f1 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs index 04f98be920..28885629c3 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs index f9be4a1eeb..aa17bd766b 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs @@ -1,7 +1,6 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; -using System.Collections.Immutable; using Npgsql.BackendMessages; namespace Npgsql.Replication.PgOutput.Messages; @@ -136,4 +135,4 @@ public enum ReplicaIdentitySetting : byte /// IndexWithIndIsReplIdent = (byte)'i' } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs b/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs index 4692e4e6c4..c7806a8b1c 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput.Messages; +namespace Npgsql.Replication.PgOutput.Messages; /// /// Represents a column in a Logical Replication Protocol relation message diff --git a/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs index 681e7af4b6..c3f652d5ee 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs index 23fc2c5a24..4c9a24b06c 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs @@ -1,12 +1,12 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; /// -/// Logical Replication Protocol stream abort message +/// Logical Replication Protocol stream abort message for Logical Streaming Replication Protocol versions 2-3 /// -public sealed class StreamAbortMessage : TransactionControlMessage +public class StreamAbortMessage : TransactionControlMessage { /// /// Xid of the subtransaction (will be same as xid of the transaction for top-level transactions). @@ -22,4 +22,31 @@ internal StreamAbortMessage Populate(NpgsqlLogSequenceNumber walStart, NpgsqlLog SubtransactionXid = subtransactionXid; return this; } -} \ No newline at end of file +} + +/// +/// Logical Replication Protocol stream abort message for Logical Streaming Replication Protocol versions 4+ +/// +public sealed class ParallelStreamAbortMessage : StreamAbortMessage +{ + /// + /// The LSN of the abort. + /// + public NpgsqlLogSequenceNumber AbortLsn { get; private set; } + + /// + /// Abort timestamp of the transaction. + /// + public DateTime AbortTimestamp { get; private set; } + + internal ParallelStreamAbortMessage() {} + + internal ParallelStreamAbortMessage Populate(NpgsqlLogSequenceNumber walStart, NpgsqlLogSequenceNumber walEnd, DateTime serverClock, + uint transactionXid, uint subtransactionXid, NpgsqlLogSequenceNumber abortLsn, DateTime abortTimestamp) + { + base.Populate(walStart, walEnd, serverClock, transactionXid, subtransactionXid); + AbortLsn = abortLsn; + AbortTimestamp = abortTimestamp; + return this; + } +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs index ae6aacc584..7dba3c3027 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs index 4947e0d046..d54dedf9f2 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs index 4b0ace1cf7..c6aeb86276 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs index f3fd165a1e..673ba5a6d5 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs index 4c3c901b2f..6c039e5475 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using NpgsqlTypes; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs index d5aac683a2..307e45a355 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using NpgsqlTypes; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs index 47837f93f3..b2ae368aa6 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs index 5e188de4fe..25747c676a 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs index 39d324b6f2..a891f66ad0 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; using System.Diagnostics; @@ -18,12 +18,6 @@ public abstract class UpdateMessage : TransactionalMessage /// public RelationMessage Relation { get; private set; } = null!; - /// - /// ID of the relation corresponding to the ID in the relation message. - /// - [Obsolete("Use Relation.RelationId")] - public uint RelationId => Relation.RelationId; - /// /// Columns representing the new row. /// @@ -55,13 +49,13 @@ public override async IAsyncEnumerator GetAsyncEnumerator(Canc // This will throw if we're already reading (or consumed) the second row var enumerator = base.GetAsyncEnumerator(cancellationToken); - await _oldRowTupleEnumerable.Consume(cancellationToken); - await ReadBuffer.EnsureAsync(3); + await _oldRowTupleEnumerable.Consume(cancellationToken).ConfigureAwait(false); + await ReadBuffer.EnsureAsync(3).ConfigureAwait(false); var tupleType = (TupleType)ReadBuffer.ReadByte(); Debug.Assert(tupleType == TupleType.NewTuple); _ = ReadBuffer.ReadUInt16(); // numColumns, - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) yield return enumerator.Current; } @@ -69,13 +63,13 @@ public override async IAsyncEnumerator GetAsyncEnumerator(Canc { if (State == RowState.NotRead) { - await _oldRowTupleEnumerable.Consume(cancellationToken); - await ReadBuffer.EnsureAsync(3); + await _oldRowTupleEnumerable.Consume(cancellationToken).ConfigureAwait(false); + await ReadBuffer.EnsureAsync(3).ConfigureAwait(false); var tupleType = (TupleType)ReadBuffer.ReadByte(); Debug.Assert(tupleType == TupleType.NewTuple); _ = ReadBuffer.ReadUInt16(); // numColumns, } - await base.Consume(cancellationToken); + await base.Consume(cancellationToken).ConfigureAwait(false); } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs index 76d983a6ee..d200c780a1 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs @@ -5,16 +5,15 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; using Npgsql.Replication.Internal; using Npgsql.Replication.PgOutput.Messages; -using Npgsql.Util; using NpgsqlTypes; namespace Npgsql.Replication.PgOutput; sealed class PgOutputAsyncEnumerable : IAsyncEnumerable { + readonly PgOutputProtocolVersion _protocolVersion; readonly LogicalReplicationConnection _connection; readonly PgOutputReplicationSlot _slot; readonly PgOutputReplicationOptions _options; @@ -40,17 +39,20 @@ sealed class PgOutputAsyncEnumerable : IAsyncEnumerable _truncateMessageRelations = new(); // V2 - readonly StreamStartMessage _streamStartMessage = new(); - readonly StreamStopMessage _streamStopMessage = new(); - readonly StreamCommitMessage _streamCommitMessage = new(); - readonly StreamAbortMessage _streamAbortMessage = new(); + readonly StreamStartMessage _streamStartMessage = null!; + readonly StreamStopMessage _streamStopMessage = null!; + readonly StreamCommitMessage _streamCommitMessage = null!; + readonly StreamAbortMessage _streamAbortMessage = null!; // V3 - readonly BeginPrepareMessage _beginPrepareMessage = new(); - readonly PrepareMessage _prepareMessage = new(); - readonly CommitPreparedMessage _commitPreparedMessage = new(); - readonly RollbackPreparedMessage _rollbackPreparedMessage = new(); - readonly StreamPrepareMessage _streamPrepareMessage = new(); + readonly BeginPrepareMessage _beginPrepareMessage = null!; + readonly PrepareMessage _prepareMessage = null!; + readonly CommitPreparedMessage _commitPreparedMessage = null!; + readonly RollbackPreparedMessage _rollbackPreparedMessage = null!; + readonly StreamPrepareMessage _streamPrepareMessage = null!; + + // V4 + readonly ParallelStreamAbortMessage _parallelStreamAbortMessage = null!; #endregion @@ -61,12 +63,38 @@ internal PgOutputAsyncEnumerable( CancellationToken cancellationToken, NpgsqlLogSequenceNumber? walLocation = null) { + _protocolVersion = options.ProtocolVersion; _connection = connection; _slot = slot; _options = options; _baseCancellationToken = cancellationToken; _walLocation = walLocation; + + if (_protocolVersion >= PgOutputProtocolVersion.V2) + { + _streamStartMessage = new(); + _streamStopMessage = new(); + _streamCommitMessage = new(); + } + if (_protocolVersion >= PgOutputProtocolVersion.V3) + { + _beginPrepareMessage = new(); + _prepareMessage = new(); + _commitPreparedMessage = new(); + _rollbackPreparedMessage = new(); + _streamPrepareMessage = new(); + } + + if (_protocolVersion >= PgOutputProtocolVersion.V4) + { + _parallelStreamAbortMessage = new(); + } + else if (_protocolVersion >= PgOutputProtocolVersion.V2) + { + _streamAbortMessage = new(); + } + var connector = _connection.Connector; _insertMessage = new(connector); _defaultUpdateMessage = new(connector); @@ -77,13 +105,7 @@ internal PgOutputAsyncEnumerable( } public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - { - return StartReplicationInternal( - CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token); - } - } + => StartReplicationInternal(CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token); async IAsyncEnumerator StartReplicationInternal(CancellationToken cancellationToken) { @@ -91,20 +113,20 @@ async IAsyncEnumerator StartReplicationInternal(Canc _slot, cancellationToken, _walLocation, _options.GetOptionPairs(), bypassingStream: true); var buf = _connection.Connector!.ReadBuffer; var inStreamingTransaction = false; - var formatCode = _options.Binary ?? false ? FormatCode.Binary : FormatCode.Text; + var dataFormat = _options.Binary ?? false ? DataFormat.Binary : DataFormat.Text; - await foreach (var xLogData in stream.WithCancellation(cancellationToken)) + await foreach (var xLogData in stream.WithCancellation(cancellationToken).ConfigureAwait(false)) { - await buf.EnsureAsync(1); + await buf.EnsureAsync(1).ConfigureAwait(false); var messageCode = (BackendReplicationMessageCode)buf.ReadByte(); switch (messageCode) { case BackendReplicationMessageCode.Begin: { - await buf.EnsureAsync(20); + await buf.EnsureAsync(20).ConfigureAwait(false); yield return _beginMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionFinalLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32()); continue; } @@ -113,43 +135,44 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(14); + await buf.EnsureAsync(14).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(10); + await buf.EnsureAsync(10).ConfigureAwait(false); transactionXid = null; } var flags = buf.ReadByte(); var messageLsn = new NpgsqlLogSequenceNumber(buf.ReadUInt64()); - var prefix = await buf.ReadNullTerminatedString(async: true, cancellationToken); - await buf.EnsureAsync(4); + var prefix = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + await buf.EnsureAsync(4).ConfigureAwait(false); var length = buf.ReadUInt32(); var data = (NpgsqlReadBuffer.ColumnStream)xLogData.Data; - data.Init(checked((int)length), false); + data.Init(checked((int)length), canSeek: false, commandScoped: false); yield return _logicalDecodingMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, flags, messageLsn, prefix, data); + await data.DisposeAsync().ConfigureAwait(false); continue; } case BackendReplicationMessageCode.Commit: { - await buf.EnsureAsync(25); + await buf.EnsureAsync(25).ConfigureAwait(false); yield return _commitMessage.Populate( xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, (CommitMessage.CommitFlags)buf.ReadByte(), commitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), transactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); continue; } case BackendReplicationMessageCode.Origin: { - await buf.EnsureAsync(9); + await buf.EnsureAsync(9).ConfigureAwait(false); yield return _originMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, originCommitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - originName: await buf.ReadNullTerminatedString(async: true, cancellationToken)); + originName: await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false)); continue; } case BackendReplicationMessageCode.Relation: @@ -157,19 +180,19 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(10); + await buf.EnsureAsync(10).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(6); + await buf.EnsureAsync(6).ConfigureAwait(false); transactionXid = null; } var relationId = buf.ReadUInt32(); - var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken); - var relationName = await buf.ReadNullTerminatedString(async: true, cancellationToken); - await buf.EnsureAsync(3); + var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + var relationName = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + await buf.EnsureAsync(3).ConfigureAwait(false); var relationReplicaIdentitySetting = (RelationMessage.ReplicaIdentitySetting)buf.ReadByte(); var numColumns = buf.ReadUInt16(); @@ -183,17 +206,17 @@ async IAsyncEnumerator StartReplicationInternal(Canc columns.Count = numColumns; for (var i = 0; i < numColumns; i++) { - await buf.EnsureAsync(2); + await buf.EnsureAsync(2).ConfigureAwait(false); var flags = (RelationMessage.Column.ColumnFlags)buf.ReadByte(); - var columnName = await buf.ReadNullTerminatedString(async: true, cancellationToken); - await buf.EnsureAsync(8); + var columnName = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + await buf.EnsureAsync(8).ConfigureAwait(false); var dateTypeId = buf.ReadUInt32(); var typeModifier = buf.ReadInt32(); columns[i] = new RelationMessage.Column(flags, columnName, dateTypeId, typeModifier); } msg.RowDescription = RowDescriptionMessage.CreateForReplication( - _connection.Connector.TypeMapper, relationId, formatCode, columns); + _connection.Connector.SerializerOptions, relationId, dataFormat, columns); yield return msg; continue; @@ -203,18 +226,18 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(9); + await buf.EnsureAsync(9).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(5); + await buf.EnsureAsync(5).ConfigureAwait(false); transactionXid = null; } var typeId = buf.ReadUInt32(); - var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken); - var name = await buf.ReadNullTerminatedString(async: true, cancellationToken); + var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + var name = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); yield return _typeMessage.Populate( xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, typeId, ns, name); continue; @@ -224,12 +247,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(11); + await buf.EnsureAsync(11).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(7); + await buf.EnsureAsync(7).ConfigureAwait(false); transactionXid = null; } @@ -248,7 +271,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc yield return _insertMessage.Populate( xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _insertMessage.Consume(cancellationToken); + await _insertMessage.Consume(cancellationToken).ConfigureAwait(false); continue; } @@ -257,12 +280,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(11); + await buf.EnsureAsync(11).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(7); + await buf.EnsureAsync(7).ConfigureAwait(false); transactionXid = null; } @@ -283,17 +306,17 @@ async IAsyncEnumerator StartReplicationInternal(Canc case TupleType.Key: yield return _indexUpdateMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _indexUpdateMessage.Consume(cancellationToken); + await _indexUpdateMessage.Consume(cancellationToken).ConfigureAwait(false); continue; case TupleType.OldTuple: yield return _fullUpdateMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _fullUpdateMessage.Consume(cancellationToken); + await _fullUpdateMessage.Consume(cancellationToken).ConfigureAwait(false); continue; case TupleType.NewTuple: yield return _defaultUpdateMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _defaultUpdateMessage.Consume(cancellationToken); + await _defaultUpdateMessage.Consume(cancellationToken).ConfigureAwait(false); continue; default: throw new NotSupportedException($"The tuple type '{tupleType}' is not supported."); @@ -304,12 +327,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(11); + await buf.EnsureAsync(11).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(7); + await buf.EnsureAsync(7).ConfigureAwait(false); transactionXid = null; } @@ -330,12 +353,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc case TupleType.Key: yield return _keyDeleteMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _keyDeleteMessage.Consume(cancellationToken); + await _keyDeleteMessage.Consume(cancellationToken).ConfigureAwait(false); continue; case TupleType.OldTuple: yield return _fullDeleteMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _fullDeleteMessage.Consume(cancellationToken); + await _fullDeleteMessage.Consume(cancellationToken).ConfigureAwait(false); continue; default: throw new NotSupportedException($"The tuple type '{tupleDataType}' is not supported."); @@ -346,12 +369,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(9); + await buf.EnsureAsync(9).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(5); + await buf.EnsureAsync(5).ConfigureAwait(false); transactionXid = null; } @@ -361,7 +384,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc _truncateMessageRelations.Count = numRels; for (var i = 0; i < numRels; i++) { - await buf.EnsureAsync(4); + await buf.EnsureAsync(4).ConfigureAwait(false); var relationId = buf.ReadUInt32(); if (!_relations.TryGetValue(relationId, out var relation)) @@ -379,7 +402,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamStart: { - await buf.EnsureAsync(5); + await buf.EnsureAsync(5).ConfigureAwait(false); inStreamingTransaction = true; yield return _streamStartMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid: buf.ReadUInt32(), streamSegmentIndicator: buf.ReadByte()); @@ -393,76 +416,90 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamCommit: { - await buf.EnsureAsync(29); + await buf.EnsureAsync(29).ConfigureAwait(false); yield return _streamCommitMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid: buf.ReadUInt32(), flags: buf.ReadByte(), commitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), transactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); continue; } case BackendReplicationMessageCode.StreamAbort: { - await buf.EnsureAsync(8); - yield return _streamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, - transactionXid: buf.ReadUInt32(), subtransactionXid: buf.ReadUInt32()); + if (_protocolVersion >= PgOutputProtocolVersion.V4) + { + await buf.EnsureAsync(24).ConfigureAwait(false); + yield return _parallelStreamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, + transactionXid: buf.ReadUInt32(), + subtransactionXid: buf.ReadUInt32(), + abortLsn: new(buf.ReadUInt64()), + abortTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); + + } + else + { + await buf.EnsureAsync(8).ConfigureAwait(false); + yield return _streamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, + transactionXid: buf.ReadUInt32(), subtransactionXid: buf.ReadUInt32()); + + } continue; } case BackendReplicationMessageCode.BeginPrepare: { - await buf.EnsureAsync(29); + await buf.EnsureAsync(29).ConfigureAwait(false); yield return _beginPrepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; } case BackendReplicationMessageCode.Prepare: { - await buf.EnsureAsync(30); + await buf.EnsureAsync(30).ConfigureAwait(false); yield return _prepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (PrepareMessage.PrepareFlags)buf.ReadByte(), prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; } case BackendReplicationMessageCode.CommitPrepared: { - await buf.EnsureAsync(30); + await buf.EnsureAsync(30).ConfigureAwait(false); yield return _commitPreparedMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (CommitPreparedMessage.CommitPreparedFlags)buf.ReadByte(), commitPreparedLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), commitPreparedEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; } case BackendReplicationMessageCode.RollbackPrepared: { - await buf.EnsureAsync(38); + await buf.EnsureAsync(38).ConfigureAwait(false); yield return _rollbackPreparedMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (RollbackPreparedMessage.RollbackPreparedFlags)buf.ReadByte(), preparedTransactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), rollbackPreparedEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), - transactionRollbackTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionRollbackTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; } case BackendReplicationMessageCode.StreamPrepare: { - await buf.EnsureAsync(30); + await buf.EnsureAsync(30).ConfigureAwait(false); yield return _streamPrepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (StreamPrepareMessage.StreamPrepareFlags)buf.ReadByte(), prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs b/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs index c67af16d58..8cd5c6f3d1 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using NpgsqlTypes; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs b/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs new file mode 100644 index 0000000000..c575a9c85e --- /dev/null +++ b/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs @@ -0,0 +1,30 @@ +namespace Npgsql.Replication.PgOutput; + +/// +/// The Logical Streaming Replication Protocol version. +/// +public enum PgOutputProtocolVersion : ulong +{ + /// + /// Version 1 is supported for server version 10 and above. + /// + V1 = 1UL, + + /// + /// Version 2 is supported only for server version 14 and above, and it allows + /// streaming of large in-progress transactions. + /// + V2 = 2UL, + + /// + /// Version 3 is supported only for server version 15 and above, and it allows + /// streaming of two-phase commits. + /// + V3 = 3UL, + + /// + /// Version 4 is supported only for server version 16 and above, and it allows + /// streams of large in-progress transactions to be applied in parallel. + /// + V4 = 4UL +} diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs index 93039fdf25..94df40222c 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Globalization; @@ -9,18 +9,61 @@ namespace Npgsql.Replication.PgOutput; /// public class PgOutputReplicationOptions : IEquatable { + /// + /// Creates a new instance of . + /// + /// The publication names to include into the stream + /// The version of the logical streaming replication protocol. + /// Passing in unsupported protocol version numbers may lead to runtime errors. + /// Send values in binary representation + /// Enable streaming of in-progress transactions. + /// Setting this to sets + /// to . + /// Write logical decoding messages into the replication stream + /// Enable streaming of prepared transactions + [Obsolete("Please switch to the overloads that take PgOutputProtocolVersion and PgOutputStreamingMode values instead.")] + public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, + bool? messages = null, bool? twoPhase = null) + : this([publicationName ?? throw new ArgumentNullException(nameof(publicationName))], (PgOutputProtocolVersion)protocolVersion, + binary, streaming.HasValue ? streaming.Value ? PgOutputStreamingMode.On : PgOutputStreamingMode.Off : null, messages, twoPhase) + { + } + /// /// Creates a new instance of . /// /// The publication names to include into the stream /// The version of the logical streaming replication protocol /// Send values in binary representation - /// Enable streaming of in-progress transactions + /// Enable streaming of in-progress transactions + /// Write logical decoding messages into the replication stream + /// Enable streaming of prepared transactions + public PgOutputReplicationOptions(string publicationName, PgOutputProtocolVersion protocolVersion, bool? binary = null, + PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) + : this([publicationName ?? throw new ArgumentNullException(nameof(publicationName))], protocolVersion, binary, streamingMode, + messages, twoPhase) + { + } + + /// + /// Creates a new instance of . + /// + /// The publication names to include into the stream + /// The version of the logical streaming replication protocol. + /// Passing in unsupported protocol version numbers may lead to runtime errors. + /// Send values in binary representation + /// Enable streaming of in-progress transactions. + /// Setting this to sets + /// to . /// Write logical decoding messages into the replication stream /// Enable streaming of prepared transactions - public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) - : this(new List { publicationName ?? throw new ArgumentNullException(nameof(publicationName)) }, protocolVersion, binary, streaming, messages, twoPhase) - { } + [Obsolete("Please switch to the overloads that take PgOutputProtocolVersion and PgOutputStreamingMode values instead.")] + public PgOutputReplicationOptions(IEnumerable publicationNames, ulong protocolVersion, bool? binary = null, + bool? streaming = null, bool? messages = null, bool? twoPhase = null) + : this(publicationNames, (PgOutputProtocolVersion)protocolVersion, binary, + streaming.HasValue ? streaming.Value ? PgOutputStreamingMode.On : PgOutputStreamingMode.Off : null, messages, twoPhase) + { + } /// /// Creates a new instance of . @@ -28,10 +71,11 @@ public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, /// The publication names to include into the stream /// The version of the logical streaming replication protocol /// Send values in binary representation - /// Enable streaming of in-progress transactions + /// Enable streaming of in-progress transactions /// Write logical decoding messages into the replication stream /// Enable streaming of prepared transactions - public PgOutputReplicationOptions(IEnumerable publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) + public PgOutputReplicationOptions(IEnumerable publicationNames, PgOutputProtocolVersion protocolVersion, bool? binary = null, + PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) { var publicationNamesList = new List(publicationNames); if (publicationNamesList.Count < 1) @@ -46,7 +90,7 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr PublicationNames = publicationNamesList; ProtocolVersion = protocolVersion; Binary = binary; - Streaming = streaming; + StreamingMode = streamingMode; Messages = messages; TwoPhase = twoPhase; } @@ -54,7 +98,7 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr /// /// The version of the Logical Streaming Replication Protocol /// - public ulong ProtocolVersion { get; } + public PgOutputProtocolVersion ProtocolVersion { get; } /// /// The publication names to stream @@ -74,10 +118,12 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr /// Enable streaming of in-progress transactions /// /// - /// This works as of logical streaming replication protocol version 2 (PostgreSQL 14+) + /// works as of logical streaming replication protocol version 2 (PostgreSQL 14+), + /// works as of logical streaming replication protocol version 4 (PostgreSQL 16+), /// // See: https://github.com/postgres/postgres/commit/464824323e57dc4b397e8b05854d779908b55304 - public bool? Streaming { get; } + // and https://github.com/postgres/postgres/commit/216a784829c2c5f03ab0c43e009126cbb819e9b2 + public PgOutputStreamingMode? StreamingMode { get; } /// /// Write logical decoding messages into the replication stream @@ -100,13 +146,21 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr internal IEnumerable> GetOptionPairs() { - yield return new KeyValuePair("proto_version", ProtocolVersion.ToString(CultureInfo.InvariantCulture)); + yield return new KeyValuePair("proto_version", ((ulong)ProtocolVersion).ToString(CultureInfo.InvariantCulture)); yield return new KeyValuePair("publication_names", "\"" + string.Join("\",\"", PublicationNames) + "\""); if (Binary != null) yield return new KeyValuePair("binary", Binary.Value ? "on" : "off"); - if (Streaming != null) - yield return new KeyValuePair("streaming", Streaming.Value ? "on" : "off"); + if (StreamingMode != null) + { + yield return new KeyValuePair("streaming", StreamingMode.Value switch + { + PgOutputStreamingMode.Off => "off", + PgOutputStreamingMode.On => "on", + PgOutputStreamingMode.Parallel => "parallel", + _ => throw new ArgumentOutOfRangeException($"Unknown {nameof(PgOutputStreamingMode)} value: {StreamingMode.Value}") + }); + } if (Messages != null) yield return new KeyValuePair("messages", Messages.Value ? "on" : "off"); if (TwoPhase != null) @@ -118,25 +172,12 @@ public bool Equals(PgOutputReplicationOptions? other) => other != null && ( ReferenceEquals(this, other) || ProtocolVersion == other.ProtocolVersion && PublicationNames.Equals(other.PublicationNames) && Binary == other.Binary && - Streaming == other.Streaming && Messages == other.Messages && TwoPhase == other.TwoPhase); + StreamingMode == other.StreamingMode && Messages == other.Messages && TwoPhase == other.TwoPhase); /// public override bool Equals(object? obj) => obj is PgOutputReplicationOptions other && other.Equals(this); /// - public override int GetHashCode() - { -#if NETSTANDARD2_0 - var hashCode = ProtocolVersion.GetHashCode(); - hashCode = (hashCode * 397) ^ PublicationNames.GetHashCode(); - hashCode = (hashCode * 397) ^ Binary.GetHashCode(); - hashCode = (hashCode * 397) ^ Streaming.GetHashCode(); - hashCode = (hashCode * 397) ^ Messages.GetHashCode(); - hashCode = (hashCode * 397) ^ TwoPhase.GetHashCode(); - return hashCode; -#else - return HashCode.Combine(ProtocolVersion, PublicationNames, Binary, Streaming, Messages, TwoPhase); -#endif - } + public override int GetHashCode() => HashCode.Combine(ProtocolVersion, PublicationNames, Binary, StreamingMode, Messages, TwoPhase); } diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs index a873f585fc..c2517347f0 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs @@ -1,4 +1,4 @@ -using Npgsql.Replication.Internal; +using Npgsql.Replication.Internal; namespace Npgsql.Replication.PgOutput; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs b/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs new file mode 100644 index 0000000000..312f842c70 --- /dev/null +++ b/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs @@ -0,0 +1,23 @@ +namespace Npgsql.Replication.PgOutput; + +/// +/// Option to enable streaming of in-progress transactions. +/// Minimum protocol version 2 is required to turn it on. Minimum protocol version 4 is required for the "parallel" option. +/// +public enum PgOutputStreamingMode +{ + /// + /// Disable streaming of in-progress transactions + /// + Off, + + /// + /// Enable streaming of in-progress transactions + /// + On, + + /// + /// Enable streaming of in-progress transactions and enable sending extra information with some messages to be used for parallelisation + /// + Parallel +} diff --git a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs index 596dc471fb..dac1ecdea9 100644 --- a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs +++ b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs @@ -1,7 +1,6 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; -using Npgsql.Replication.PgOutput.Messages; namespace Npgsql.Replication.PgOutput; @@ -12,7 +11,7 @@ sealed class ReadOnlyArrayBuffer : IReadOnlyList int _size; public ReadOnlyArrayBuffer() - => _items = Array.Empty(); + => _items = []; ReadOnlyArrayBuffer(T[] items) { diff --git a/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs b/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs index 4ef1e6bf6b..43bd08b4ac 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs @@ -58,10 +58,10 @@ internal async Task Consume(CancellationToken cancellationToken) case RowState.NotRead: State = RowState.Reading; _tupleEnumerator.Reset(NumColumns, _rowDescription, cancellationToken); - while (await _tupleEnumerator.MoveNextAsync()) { } + while (await _tupleEnumerator.MoveNextAsync().ConfigureAwait(false)) { } break; case RowState.Reading: - while (await _tupleEnumerator.MoveNextAsync()) { } + while (await _tupleEnumerator.MoveNextAsync().ConfigureAwait(false)) { } break; case RowState.Consumed: return; @@ -76,4 +76,4 @@ enum RowState NotRead, Reading, Consumed -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index 6ad0cbc6e1..6dc73f33a4 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -4,9 +4,7 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using Npgsql.Replication.PgOutput.Messages; namespace Npgsql.Replication.PgOutput; @@ -27,26 +25,21 @@ public class ReplicationValue /// public TupleDataKind Kind { get; private set; } - bool _columnConsumed; FieldDescription _fieldDescription = null!; + ReadConversionContext _lastConversionContext; + bool _isConsumed; - /// - /// A stream that has been opened on a column. - /// - readonly NpgsqlReadBuffer.ColumnStream _columnStream; + PgReader PgReader => _readBuffer.PgReader; - internal ReplicationValue(NpgsqlConnector connector) - { - _readBuffer = connector.ReadBuffer; - _columnStream = new NpgsqlReadBuffer.ColumnStream(connector, startCancellableOperations: false); - } + internal ReplicationValue(NpgsqlConnector connector) => _readBuffer = connector.ReadBuffer; internal void Reset(TupleDataKind kind, int length, FieldDescription fieldDescription) { Kind = kind; Length = length; _fieldDescription = fieldDescription; - _columnConsumed = false; + _lastConversionContext = default; + _isConsumed = false; } // ReSharper disable once InconsistentNaming @@ -83,6 +76,12 @@ public bool IsUnchangedToastedValue /// The data type of the specified column. public Type GetFieldType() => _fieldDescription.FieldType; + /// + /// Gets the name of the specified column. + /// + /// The name of the specified column. + public string GetFieldName() => _fieldDescription.Name; + /// /// Gets the value of the specified column as a type. /// @@ -91,19 +90,57 @@ public bool IsUnchangedToastedValue /// An optional token to cancel the asynchronous operation. The default value is . /// /// - public ValueTask Get(CancellationToken cancellationToken = default) + public ValueTask Get(CancellationToken cancellationToken = default) => GetAsyncCore(cancellationToken); + + /// + /// Gets the value of the specified column as an instance of . + /// + /// + /// An optional token to cancel the asynchronous operation. The default value is . + /// + /// + public ValueTask Get(CancellationToken cancellationToken = default) => GetAsyncCore(cancellationToken); + + /// + /// Retrieves data as a . + /// + public Stream GetStream() => GetCore(); + + /// + /// Retrieves data as a . + /// + public TextReader GetTextReader() => GetCore(); + + internal async Task Consume(CancellationToken cancellationToken) { - CheckAndMarkConsumed(); + if (_isConsumed) + return; + + var reader = PgReader; + if (!reader.Initialized) + reader.Init(_fieldDescription.DataFormat, Length); + await reader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + await reader.CommitAsync().ConfigureAwait(false); + + _isConsumed = true; + } + + T GetCore() + { + ThrowIfInitialized(); + + _fieldDescription.GetConversionContext(typeof(T), ref _lastConversionContext); + var conversionContext = _lastConversionContext; switch (Kind) { case TupleDataKind.Null: // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) + if (default(T) is null && typeof(T).IsValueType) return default!; if (typeof(T) == typeof(object)) - return new ValueTask((T)(object)DBNull.Value); + return (T)(object)DBNull.Value; ThrowHelper.ThrowInvalidCastException_NoValue(_fieldDescription); break; @@ -113,146 +150,46 @@ public ValueTask Get(CancellationToken cancellationToken = default) $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - using (NoSynchronizationContextScope.Enter()) - return GetCore(cancellationToken); - - async ValueTask GetCore(CancellationToken cancellationToken) - { - using var tokenRegistration = _readBuffer.ReadBytesLeft < Length - ? _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken) - : default; - - var position = _readBuffer.ReadPosition; - - try - { - return NullableHandler.Exists - ? await NullableHandler.ReadAsync(_fieldDescription.Handler, _readBuffer, Length, async: true, _fieldDescription) - : typeof(T) == typeof(object) - ? (T)await _fieldDescription.Handler.ReadAsObject(_readBuffer, Length, async: true, _fieldDescription) - : await _fieldDescription.Handler.Read(_readBuffer, Length, async: true, _fieldDescription); - } - catch - { - if (_readBuffer.Connector.State != ConnectorState.Broken) - { - var writtenBytes = _readBuffer.ReadPosition - position; - var remainingBytes = Length - writtenBytes; - if (remainingBytes > 0) - _readBuffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - - throw; - } - } + var reader = PgReader; + reader.Init(conversionContext.Binding.DataFormat, Length); + return conversionContext.TypeInfo.ReadFieldValue(PgReader, conversionContext.Binding); } - /// - /// Gets the value of the specified column as an instance of . - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// - public ValueTask Get(CancellationToken cancellationToken = default) + async ValueTask GetAsyncCore(CancellationToken cancellationToken) { - CheckAndMarkConsumed(); + ThrowIfInitialized(); + + _fieldDescription.GetConversionContext(typeof(T), ref _lastConversionContext); + var conversionContext = _lastConversionContext; switch (Kind) { case TupleDataKind.Null: - return new ValueTask(DBNull.Value); - - case TupleDataKind.UnchangedToastedValue: - throw new InvalidCastException( - $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); - } - - using (NoSynchronizationContextScope.Enter()) - return GetCore(cancellationToken); - - async ValueTask GetCore(CancellationToken cancellationToken) - { - using var tokenRegistration = _readBuffer.ReadBytesLeft < Length - ? _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken) - : default; - - var position = _readBuffer.ReadPosition; - - try - { - return await _fieldDescription.Handler.ReadAsObject(_readBuffer, Length, async: true, _fieldDescription); - } - catch - { - if (_readBuffer.Connector.State != ConnectorState.Broken) - { - var writtenBytes = _readBuffer.ReadPosition - position; - var remainingBytes = Length - writtenBytes; - if (remainingBytes > 0) - _readBuffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - - throw; - } - } - } + // When T is a Nullable (and only in that case), we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; - /// - /// Retrieves data as a . - /// - public Stream GetStream() - { - CheckAndMarkConsumed(); + if (typeof(T) == typeof(object)) + return (T)(object)DBNull.Value; - switch (Kind) - { - case TupleDataKind.Null: ThrowHelper.ThrowInvalidCastException_NoValue(_fieldDescription); break; case TupleDataKind.UnchangedToastedValue: - throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); + throw new InvalidCastException( + $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - _columnStream.Init(Length, canSeek: false); - return _columnStream; - } - - /// - /// Retrieves data as a . - /// - public TextReader GetTextReader() - => _fieldDescription.Handler is ITextReaderHandler handler - ? handler.GetTextReader(GetStream(), _readBuffer) - : throw new InvalidCastException( - $"The GetTextReader method is not supported for type {_fieldDescription.Handler.PgDisplayName}"); - - internal async Task Consume(CancellationToken cancellationToken) - { - if (!_columnStream.IsDisposed) - await _columnStream.DisposeAsync(); - - if (!_columnConsumed) - { - if (_readBuffer.ReadBytesLeft < 4) - { - using var tokenRegistration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken); - await _readBuffer.Skip(Length, async: true); - } - else - { - await _readBuffer.Skip(Length, async: true); - } - } + using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - _columnConsumed = true; + var reader = PgReader; + reader.Init(conversionContext.Binding.DataFormat, Length); + return await conversionContext.TypeInfo.ReadFieldValueAsync(PgReader, conversionContext.Binding, cancellationToken).ConfigureAwait(false); } - void CheckAndMarkConsumed() + void ThrowIfInitialized() { - if (_columnConsumed) + if (PgReader.Initialized) throw new InvalidOperationException("Column has already been consumed"); - _columnConsumed = true; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/TupleDataKind.cs b/src/Npgsql/Replication/PgOutput/TupleDataKind.cs index 141e4af16e..6bd404a7d3 100644 --- a/src/Npgsql/Replication/PgOutput/TupleDataKind.cs +++ b/src/Npgsql/Replication/PgOutput/TupleDataKind.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput; +namespace Npgsql.Replication.PgOutput; /// /// The kind of data transmitted for a tuple in a Logical Replication Protocol message. diff --git a/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs b/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs index 95e5bfe293..cee25671af 100644 --- a/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs +++ b/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs @@ -4,7 +4,6 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Replication.PgOutput.Messages; namespace Npgsql.Replication.PgOutput; @@ -39,21 +38,20 @@ public ValueTask MoveNextAsync() if (_tupleEnumerable.State != RowState.Reading) throw new ObjectDisposedException(null); - using (NoSynchronizationContextScope.Enter()) - return MoveNextCore(); + return MoveNextCore(); async ValueTask MoveNextCore() { // Consume the previous column if (_pos != -1) - await _value.Consume(_cancellationToken); + await _value.Consume(_cancellationToken).ConfigureAwait(false); if (_pos + 1 == _numColumns) return false; _pos++; // Read the next column - await _readBuffer.Ensure(1, async: true); + await _readBuffer.Ensure(1, async: true).ConfigureAwait(false); var kind = (TupleDataKind)_readBuffer.ReadByte(); int len; switch (kind) @@ -64,11 +62,7 @@ async ValueTask MoveNextCore() break; case TupleDataKind.TextValue: case TupleDataKind.BinaryValue: - if (_readBuffer.ReadBytesLeft < 4) - { - using var tokenRegistration = _readBuffer.Connector.StartNestedCancellableOperation(_cancellationToken); - await _readBuffer.Ensure(4, async: true); - } + await _readBuffer.Ensure(4, async: true).ConfigureAwait(false); len = _readBuffer.ReadInt32(); break; default: @@ -92,8 +86,8 @@ async ValueTask MoveNextCore() public async ValueTask DisposeAsync() { if (_tupleEnumerable.State == RowState.Reading) - while (await MoveNextAsync()) { /* Do nothing, just iterate the enumerator */ } + while (await MoveNextAsync().ConfigureAwait(false)) { /* Do nothing, just iterate the enumerator */ } _tupleEnumerable.State = RowState.Consumed; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PhysicalReplicationConnection.cs b/src/Npgsql/Replication/PhysicalReplicationConnection.cs index d10d42e71b..bad56ff357 100644 --- a/src/Npgsql/Replication/PhysicalReplicationConnection.cs +++ b/src/Npgsql/Replication/PhysicalReplicationConnection.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Globalization; using System.Runtime.CompilerServices; @@ -50,31 +50,25 @@ public PhysicalReplicationConnection(string? connectionString) : base(connection /// A representing a that represents the /// newly-created replication slot. /// - public Task CreateReplicationSlot( + public async Task CreateReplicationSlot( string slotName, bool isTemporary = false, bool reserveWal = false, CancellationToken cancellationToken = default) { CheckDisposed(); - using var _ = NoSynchronizationContextScope.Enter(); - return CreatePhysicalReplicationSlot(slotName, isTemporary, reserveWal, cancellationToken); + var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); + if (isTemporary) + builder.Append(" TEMPORARY"); + builder.Append(" PHYSICAL"); + if (reserveWal) + builder.Append(PostgreSqlVersion.Major >= 15 ? " (RESERVE_WAL)" : " RESERVE_WAL"); - async Task CreatePhysicalReplicationSlot(string slotName, bool isTemporary, bool reserveWal, CancellationToken cancellationToken) - { - var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); - if (isTemporary) - builder.Append(" TEMPORARY"); - builder.Append(" PHYSICAL"); - if (reserveWal) - builder.Append(PostgreSqlVersion.Major >= 15 ? " (RESERVE_WAL)" : " RESERVE_WAL"); - - var command = builder.ToString(); + var command = builder.ToString(); - LogMessages.CreatingReplicationSlot(ReplicationLogger, slotName, command, Connector.Id); + LogMessages.CreatingReplicationSlot(ReplicationLogger, slotName, command, Connector.Id); - var slotOptions = await CreateReplicationSlot(builder.ToString(), cancellationToken); + var slotOptions = await CreateReplicationSlot(builder.ToString(), cancellationToken).ConfigureAwait(false); - return new PhysicalReplicationSlot(slotOptions.SlotName); - } + return new PhysicalReplicationSlot(slotOptions.SlotName); } /// @@ -92,10 +86,7 @@ async Task CreatePhysicalReplicationSlot(string slotNam /// A representing a or /// if the replication slot does not exist. public Task ReadReplicationSlot(string slotName, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ReadReplicationSlotInternal(slotName, cancellationToken); - } + => ReadReplicationSlotInternal(slotName, cancellationToken); /// /// Instructs the server to start streaming the WAL for physical replication, starting at WAL location @@ -119,15 +110,15 @@ async Task CreatePhysicalReplicationSlot(string slotNam public IAsyncEnumerable StartReplication(PhysicalReplicationSlot? slot, NpgsqlLogSequenceNumber walLocation, CancellationToken cancellationToken, - ulong timeline = default) + uint timeline = default) { - using (NoSynchronizationContextScope.Enter()) - return StartPhysicalReplication(slot, walLocation, cancellationToken, timeline); + return StartPhysicalReplication(slot, walLocation, cancellationToken, timeline); + // Local method to avoid having to add the EnumeratorCancellation attribute to the public signature. async IAsyncEnumerable StartPhysicalReplication(PhysicalReplicationSlot? slot, NpgsqlLogSequenceNumber walLocation, [EnumeratorCancellation] CancellationToken cancellationToken, - ulong timeline) + uint timeline) { var builder = new StringBuilder("START_REPLICATION"); if (slot != null) @@ -141,7 +132,7 @@ async IAsyncEnumerable StartPhysicalReplication(PhysicalReplica LogMessages.StartingPhysicalReplication(ReplicationLogger, slot?.Name, command, Connector.Id); var enumerator = StartReplicationInternalWrapper(command, bypassingStream: false, cancellationToken); - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) yield return enumerator.Current; } } @@ -162,7 +153,7 @@ async IAsyncEnumerable StartPhysicalReplication(PhysicalReplica /// A representing an that /// can be used to stream WAL entries in form of instances. public IAsyncEnumerable StartReplication( - NpgsqlLogSequenceNumber walLocation, CancellationToken cancellationToken, ulong timeline = default) + NpgsqlLogSequenceNumber walLocation, CancellationToken cancellationToken, uint timeline = default) => StartReplication(slot: null, walLocation: walLocation, timeline: timeline, cancellationToken: cancellationToken); /// diff --git a/src/Npgsql/Replication/PhysicalReplicationSlot.cs b/src/Npgsql/Replication/PhysicalReplicationSlot.cs index 9bc1018207..4495b404c1 100644 --- a/src/Npgsql/Replication/PhysicalReplicationSlot.cs +++ b/src/Npgsql/Replication/PhysicalReplicationSlot.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; namespace Npgsql.Replication; @@ -17,7 +17,7 @@ public class PhysicalReplicationSlot : ReplicationSlot /// The name of the existing replication slot /// The replication slot's restart_lsn /// The timeline ID associated to restart_lsn, following the current timeline history. - public PhysicalReplicationSlot(string slotName, NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) + public PhysicalReplicationSlot(string slotName, NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) : base(slotName) { RestartLsn = restartLsn; @@ -32,5 +32,5 @@ public PhysicalReplicationSlot(string slotName, NpgsqlLogSequenceNumber? restart /// /// The timeline ID associated to restart_lsn, following the current timeline history. /// - public ulong? RestartTimeline { get; } -} \ No newline at end of file + public uint? RestartTimeline { get; } +} diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 6a09c13811..fb631b5c1c 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -1,4 +1,4 @@ -using Npgsql.BackendMessages; +using Npgsql.BackendMessages; using NpgsqlTypes; using System; using System.Collections.Generic; @@ -11,7 +11,6 @@ using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; using static Npgsql.Util.Statics; using Npgsql.Util; @@ -82,8 +81,8 @@ private protected ReplicationConnection(string? connectionString) : this() /// /// /// Since replication connections are a special kind of connection, - /// , , - /// and + /// , + /// and /// are always disabled no matter what you set them to in your connection string. /// [AllowNull] @@ -96,15 +95,10 @@ public string ConnectionString { { Pooling = false, Enlist = false, - Multiplexing = false, KeepAlive = 0, ReplicationMode = ReplicationMode }; - // Physical replication connections don't allow regular queries, so we can't load types from PG - if (ReplicationMode == ReplicationMode.Physical) - cs.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading; - _npgsqlConnection.ConnectionString = cs.ToString(); } } @@ -250,53 +244,47 @@ public async Task Open(CancellationToken cancellationToken = default) /// with freeing, releasing, or resetting its unmanaged resources asynchronously. /// /// A task that represents the asynchronous dispose operation. - public ValueTask DisposeAsync() + public async ValueTask DisposeAsync() { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncCore(); + if (_isDisposed) + return; - async ValueTask DisposeAsyncCore() + if (_npgsqlConnection.Connector?.State == ConnectorState.Replication) { - if (_isDisposed) - return; + Debug.Assert(_currentEnumerator is not null); + Debug.Assert(_replicationCancellationTokenSource is not null); - if (_npgsqlConnection.Connector?.State == ConnectorState.Replication) + // Replication is in progress; cancel it (soft or hard) and iterate the enumerator until we get the cancellation + // exception. Note: this isn't thread-safe: a user calling DisposeAsync and enumerating at the same time is violating + // our contract. + _replicationCancellationTokenSource.Cancel(); + try { - Debug.Assert(_currentEnumerator is not null); - Debug.Assert(_replicationCancellationTokenSource is not null); - - // Replication is in progress; cancel it (soft or hard) and iterate the enumerator until we get the cancellation - // exception. Note: this isn't thread-safe: a user calling DisposeAsync and enumerating at the same time is violating - // our contract. - _replicationCancellationTokenSource.Cancel(); - try - { - while (await _currentEnumerator.MoveNextAsync()) - { - // Do nothing with messages - simply enumerate until cancellation/termination - } - } - catch + while (await _currentEnumerator.MoveNextAsync().ConfigureAwait(false)) { - // Cancellation/termination occurred + // Do nothing with messages - simply enumerate until cancellation/termination } } - - Debug.Assert(_sendFeedbackTimer is null, "Send feedback timer isn't null at replication shutdown"); - Debug.Assert(_requestFeedbackTimer is null, "Request feedback timer isn't null at replication shutdown"); - _feedbackSemaphore.Dispose(); - - try - { - await _npgsqlConnection.Close(async: true); - } catch { - // Dispose + // Cancellation/termination occurred } + } + + Debug.Assert(_sendFeedbackTimer is null, "Send feedback timer isn't null at replication shutdown"); + Debug.Assert(_requestFeedbackTimer is null, "Request feedback timer isn't null at replication shutdown"); + _feedbackSemaphore.Dispose(); - _isDisposed = true; + try + { + await _npgsqlConnection.Close(async: true).ConfigureAwait(false); } + catch + { + // Dispose + } + + _isDisposed = true; } #endregion Open / Dispose @@ -312,19 +300,12 @@ async ValueTask DisposeAsyncCore() /// /// A containing information about the system we are connected to. /// - public Task IdentifySystem(CancellationToken cancellationToken = default) + public async Task IdentifySystem(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return IdentifySystemInternal(cancellationToken); - - async Task IdentifySystemInternal(CancellationToken cancellationToken) - { - var row = await ReadSingleRow("IDENTIFY_SYSTEM", cancellationToken); - return new ReplicationSystemIdentification( - (string)row[0], (uint)row[1], NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); - } + var row = await ReadSingleRow("IDENTIFY_SYSTEM", cancellationToken).ConfigureAwait(false); + return new ReplicationSystemIdentification( + (string)row[0], (uint)row[1], NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); } - /// /// Requests the server to send the current setting of a run-time parameter. /// This is similar to the SQL command SHOW. @@ -338,14 +319,12 @@ async Task IdentifySystemInternal(CancellationT /// The current setting of the run-time parameter specified in as . public Task Show(string parameterName, CancellationToken cancellationToken = default) { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); - using (NoSynchronizationContextScope.Enter()) - return ShowInternal(parameterName, cancellationToken); + return ShowInternal(parameterName, cancellationToken); async Task ShowInternal(string parameterName, CancellationToken cancellationToken) - => (string)(await ReadSingleRow("SHOW " + parameterName, cancellationToken))[0]; + => (string)(await ReadSingleRow("SHOW " + parameterName, cancellationToken).ConfigureAwait(false))[0]; } /// @@ -356,23 +335,17 @@ async Task ShowInternal(string parameterName, CancellationToken cancella /// An optional token to cancel the asynchronous operation. The default value is . /// /// The timeline history file for timeline tli - public Task TimelineHistory(uint tli, CancellationToken cancellationToken = default) + public async Task TimelineHistory(uint tli, CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return TimelineHistoryInternal(tli, cancellationToken); - - async Task TimelineHistoryInternal(uint tli, CancellationToken cancellationToken) - { - var result = await ReadSingleRow($"TIMELINE_HISTORY {tli:D}", cancellationToken); - return new TimelineHistoryFile((string)result[0], (byte[])result[1]); - } + var result = await ReadSingleRow($"TIMELINE_HISTORY {tli:D}", cancellationToken).ConfigureAwait(false); + return new TimelineHistoryFile((string)result[0], (byte[])result[1]); } internal async Task CreateReplicationSlot(string command, CancellationToken cancellationToken = default) { try { - var result = await ReadSingleRow(command, cancellationToken); + var result = await ReadSingleRow(command, cancellationToken).ConfigureAwait(false); var slotName = (string)result[0]; var consistentPoint = (string)result[1]; var snapshotName = (string?)result[2]; @@ -407,7 +380,7 @@ internal async Task CreateReplicationSlot(string command internal async Task ReadReplicationSlotInternal(string slotName, CancellationToken cancellationToken = default) { - var result = await ReadSingleRow($"READ_REPLICATION_SLOT {slotName}", cancellationToken); + var result = await ReadSingleRow($"READ_REPLICATION_SLOT {slotName}", cancellationToken).ConfigureAwait(false); var slotType = (string?)result[0]; // Currently (2021-12-30) slot_type is always 'physical' for existing slots or null for slot names that don't exist but that @@ -416,7 +389,7 @@ internal async Task CreateReplicationSlot(string command { case "physical": var restartLsn = (string?)result[1]; - var restartTli = (ulong?)result[2]; + var restartTli = (uint?)result[2]; return new PhysicalReplicationSlot( slotName.ToLowerInvariant(), restartLsn == null ? null : NpgsqlLogSequenceNumber.Parse(restartLsn), @@ -449,17 +422,17 @@ internal async IAsyncEnumerator StartReplicationInternal( _replicationCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - using var _ = Connector.StartUserAction( + using var _ = connector.StartUserAction( ConnectorState.Replication, _replicationCancellationTokenSource.Token, attemptPgCancellation: _pgCancellationSupported); NpgsqlReadBuffer.ColumnStream? columnStream = null; try { - await connector.WriteQuery(command, true, cancellationToken); - await connector.Flush(true, cancellationToken); + await connector.WriteQuery(command, true, cancellationToken).ConfigureAwait(false); + await connector.Flush(true, cancellationToken).ConfigureAwait(false); - var msg = await connector.ReadMessage(true); + var msg = await connector.ReadMessage(true).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CopyBothResponse: @@ -474,17 +447,19 @@ internal async IAsyncEnumerator StartReplicationInternal( var buf = connector.ReadBuffer; - // Cancellation is handled at the replication level - we don't want every ReadAsync - columnStream = new NpgsqlReadBuffer.ColumnStream(connector, startCancellableOperations: false); + columnStream = new NpgsqlReadBuffer.ColumnStream(connector); SetTimeouts(_walReceiverTimeout, CommandTimeout); - _sendFeedbackTimer = new Timer(TimerSendFeedback, state: null, WalReceiverStatusInterval, Timeout.InfiniteTimeSpan); - _requestFeedbackTimer = new Timer(TimerRequestFeedback, state: null, _requestFeedbackInterval, Timeout.InfiniteTimeSpan); + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + { + _sendFeedbackTimer = new Timer(TimerSendFeedback, state: null, WalReceiverStatusInterval, Timeout.InfiniteTimeSpan); + _requestFeedbackTimer = new Timer(TimerRequestFeedback, state: null, _requestFeedbackInterval, Timeout.InfiniteTimeSpan); + } while (true) { - msg = await Connector.ReadMessage(async: true); + msg = await connector.ReadMessage(async: true).ConfigureAwait(false); Expect(msg, Connector); // We received some message so there's no need to forcibly request feedback @@ -492,16 +467,16 @@ internal async IAsyncEnumerator StartReplicationInternal( _requestFeedbackTimer.Change(_requestFeedbackInterval, Timeout.InfiniteTimeSpan); var messageLength = ((CopyDataMessage)msg).Length; - await buf.EnsureAsync(1); + await buf.EnsureAsync(1).ConfigureAwait(false); var code = (char)buf.ReadByte(); switch (code) { case 'w': // XLogData { - await buf.EnsureAsync(24); + await buf.EnsureAsync(24).ConfigureAwait(false); var startLsn = buf.ReadUInt64(); var endLsn = buf.ReadUInt64(); - var sendTime = DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); + var sendTime = PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); if (unchecked((ulong)Interlocked.Read(ref _lastReceivedLsn)) < startLsn) Interlocked.Exchange(ref _lastReceivedLsn, unchecked((long)startLsn)); @@ -510,7 +485,7 @@ internal async IAsyncEnumerator StartReplicationInternal( // dataLen = msg.Length - (code = 1 + walStart = 8 + walEnd = 8 + serverClock = 8) var dataLen = messageLength - 25; - columnStream.Init(dataLen, canSeek: false); + columnStream.Init(dataLen, canSeek: false, commandScoped: false); _cachedXLogDataMessage.Populate(new NpgsqlLogSequenceNumber(startLsn), new NpgsqlLogSequenceNumber(endLsn), sendTime, columnStream); @@ -519,20 +494,20 @@ internal async IAsyncEnumerator StartReplicationInternal( // Our consumer may not have read the stream to the end, but it might as well have been us // ourselves bypassing the stream and reading directly from the buffer in StartReplication() if (!columnStream.IsDisposed && columnStream.Position < columnStream.Length && !bypassingStream) - await buf.Skip(columnStream.Length - columnStream.Position, true); + await buf.Skip(async: true, checked((int)(columnStream.Length - columnStream.Position))).ConfigureAwait(false); continue; } case 'k': // Primary keepalive message { - await buf.EnsureAsync(17); + await buf.EnsureAsync(17).ConfigureAwait(false); var end = buf.ReadUInt64(); if (ReplicationLogger.IsEnabled(LogLevel.Trace)) { var endLsn = new NpgsqlLogSequenceNumber(end); - var timestamp = DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); + var timestamp = PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); LogMessages.ReceivedReplicationPrimaryKeepalive(ReplicationLogger, endLsn, timestamp, Connector.Id); } else @@ -545,7 +520,7 @@ internal async IAsyncEnumerator StartReplicationInternal( if (replyRequested) { LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, "the server requested it", Connector.Id); - await SendFeedback(waitOnSemaphore: true, cancellationToken: CancellationToken.None); + await SendFeedback(waitOnSemaphore: true, cancellationToken: CancellationToken.None).ConfigureAwait(false); } continue; @@ -559,33 +534,12 @@ internal async IAsyncEnumerator StartReplicationInternal( finally { if (columnStream != null && !bypassingStream && !_replicationCancellationTokenSource.Token.IsCancellationRequested) - await columnStream.DisposeAsync(); - -#if NETSTANDARD2_0 - if (_sendFeedbackTimer != null) - { - var mre = new ManualResetEvent(false); - var actuallyDisposed = _sendFeedbackTimer.Dispose(mre); - Debug.Assert(actuallyDisposed, $"{nameof(_sendFeedbackTimer)} had already been disposed when completing replication"); - if (actuallyDisposed) - await mre.WaitOneAsync(cancellationToken); - } - - if (_requestFeedbackTimer != null) - { - var mre = new ManualResetEvent(false); - var actuallyDisposed = _requestFeedbackTimer.Dispose(mre); - Debug.Assert(actuallyDisposed, $"{nameof(_requestFeedbackTimer)} had already been disposed when completing replication"); - if (actuallyDisposed) - await mre.WaitOneAsync(cancellationToken); - } -#else + await columnStream.DisposeAsync().ConfigureAwait(false); if (_sendFeedbackTimer != null) - await _sendFeedbackTimer.DisposeAsync(); + await _sendFeedbackTimer.DisposeAsync().ConfigureAwait(false); if (_requestFeedbackTimer != null) - await _requestFeedbackTimer.DisposeAsync(); -#endif + await _requestFeedbackTimer.DisposeAsync().ConfigureAwait(false); _sendFeedbackTimer = null; _requestFeedbackTimer = null; @@ -626,31 +580,25 @@ public void SetReplicationStatus(NpgsqlLogSequenceNumber lastAppliedAndFlushedLs /// /// The connection currently isn't streaming /// A Task representing the sending of the status update (and not any PostgreSQL response). - public Task SendStatusUpdate(CancellationToken cancellationToken = default) + public async Task SendStatusUpdate(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return SendStatusUpdateInternal(cancellationToken); - - async Task SendStatusUpdateInternal(CancellationToken cancellationToken) - { - CheckDisposed(); - cancellationToken.ThrowIfCancellationRequested(); + CheckDisposed(); + cancellationToken.ThrowIfCancellationRequested(); - // TODO: If the user accidentally does concurrent usage of the connection, the following is vulnerable to race conditions. - // However, we generally aren't safe for this in Npgsql, leaving as-is for now. - if (Connector.State != ConnectorState.Replication) - throw new InvalidOperationException("Status update can only be sent during replication"); + // TODO: If the user accidentally does concurrent usage of the connection, the following is vulnerable to race conditions. + // However, we generally aren't safe for this in Npgsql, leaving as-is for now. + if (Connector.State != ConnectorState.Replication) + throw new InvalidOperationException("Status update can only be sent during replication"); - LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, nameof(SendStatusUpdate) + "was called", Connector.Id); - await SendFeedback(waitOnSemaphore: true, cancellationToken: cancellationToken); - } + LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, nameof(SendStatusUpdate) + "was called", Connector.Id); + await SendFeedback(waitOnSemaphore: true, cancellationToken: cancellationToken).ConfigureAwait(false); } async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, CancellationToken cancellationToken = default) { var taken = waitOnSemaphore - ? await _feedbackSemaphore.WaitAsync(Timeout.Infinite, cancellationToken) - : await _feedbackSemaphore.WaitAsync(TimeSpan.Zero, cancellationToken); + ? await _feedbackSemaphore.WaitAsync(Timeout.Infinite, cancellationToken).ConfigureAwait(false) + : await _feedbackSemaphore.WaitAsync(TimeSpan.Zero, cancellationToken).ConfigureAwait(false); if (!taken) { @@ -666,8 +614,9 @@ async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, const int len = 39; if (buf.WriteSpaceLeft < len) - await connector.Flush(async: true, cancellationToken); + await connector.Flush(async: true, cancellationToken).ConfigureAwait(false); + buf.StartMessage(len); buf.WriteByte(FrontendMessageCode.CopyData); buf.WriteInt32(len - 1); buf.WriteByte((byte)'r'); // TODO: enum/const? @@ -679,10 +628,10 @@ async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, buf.WriteInt64(lastReceivedLsn); buf.WriteInt64(lastFlushedLsn); buf.WriteInt64(lastAppliedLsn); - buf.WriteInt64(DateTimeUtils.EncodeTimestamp(timestamp)); + buf.WriteInt64(PgDateTime.EncodeTimestamp(timestamp)); buf.WriteByte(requestReply ? (byte)1 : (byte)0); - await connector.Flush(async: true, cancellationToken); + await connector.Flush(async: true, cancellationToken).ConfigureAwait(false); if (ReplicationLogger.IsEnabled(LogLevel.Trace)) { @@ -718,7 +667,7 @@ async void TimerRequestFeedback(object? obj) if (ReplicationLogger.IsEnabled(LogLevel.Trace)) LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, $"half of the {nameof(WalReceiverTimeout)} of {WalReceiverTimeout} has expired", Connector.Id); - await SendFeedback(waitOnSemaphore: true, requestReply: true); + await SendFeedback(waitOnSemaphore: true, requestReply: true).ConfigureAwait(false); } catch { @@ -736,7 +685,7 @@ async void TimerSendFeedback(object? obj) if (ReplicationLogger.IsEnabled(LogLevel.Trace)) LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, $"{nameof(WalReceiverStatusInterval)} of {WalReceiverStatusInterval} has expired", Connector.Id); - await SendFeedback(); + await SendFeedback().ConfigureAwait(false); } catch { @@ -760,16 +709,14 @@ async void TimerSendFeedback(object? obj) /// A task representing the asynchronous drop operation. public Task DropReplicationSlot(string slotName, bool wait = false, CancellationToken cancellationToken = default) { - if (slotName is null) - throw new ArgumentNullException(nameof(slotName)); + ArgumentNullException.ThrowIfNull(slotName); - using (NoSynchronizationContextScope.Enter()) - return DropReplicationSlotInternal(slotName, wait, cancellationToken); + CheckDisposed(); + + return DropReplicationSlotInternal(slotName, wait, cancellationToken); async Task DropReplicationSlotInternal(string slotName, bool wait, CancellationToken cancellationToken) { - CheckDisposed(); - using var _ = Connector.StartUserAction(cancellationToken, attemptPgCancellation: _pgCancellationSupported); var command = "DROP_REPLICATION_SLOT " + slotName; @@ -778,16 +725,16 @@ async Task DropReplicationSlotInternal(string slotName, bool wait, CancellationT LogMessages.DroppingReplicationSlot(ReplicationLogger, slotName, command, Connector.Id); - await Connector.WriteQuery(command, true, CancellationToken.None); - await Connector.Flush(true, CancellationToken.None); + await Connector.WriteQuery(command, true, CancellationToken.None).ConfigureAwait(false); + await Connector.Flush(true, CancellationToken.None).ConfigureAwait(false); - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); // Two CommandComplete messages are returned if (PostgreSqlVersion < FirstVersionWithoutDropSlotDoubleCommandCompleteMessage) - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); } } @@ -801,45 +748,41 @@ async Task ReadSingleRow(string command, CancellationToken cancellatio LogMessages.ExecutingReplicationCommand(ReplicationLogger, command, Connector.Id); - await Connector.WriteQuery(command, true, cancellationToken); - await Connector.Flush(true, cancellationToken); + await Connector.WriteQuery(command, true, cancellationToken).ConfigureAwait(false); + await Connector.Flush(true, cancellationToken).ConfigureAwait(false); - var rowDescription = Expect(await Connector.ReadMessage(true), Connector); - Expect(await Connector.ReadMessage(true), Connector); + var rowDescription = Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); var buf = Connector.ReadBuffer; - await buf.EnsureAsync(2); + await buf.EnsureAsync(2).ConfigureAwait(false); var results = new object[buf.ReadInt16()]; for (var i = 0; i < results.Length; i++) { - await buf.EnsureAsync(4); + await buf.EnsureAsync(4).ConfigureAwait(false); var len = buf.ReadInt32(); if (len == -1) continue; - await buf.EnsureAsync(len); + await buf.EnsureAsync(len).ConfigureAwait(false); var field = rowDescription[i]; switch (field.PostgresType.Name) { case "text": results[i] = buf.ReadString(len); continue; + // Currently in all instances where ReadSingleRow gets called, we expect unsigned integer values only, since that's always + // TimeLineID which is a uint32 in PostgreSQL that is sent as integer up to PG 15 and as bigint as of PG 16 + // (https://github.com/postgres/postgres/blob/57d0051706b897048063acc14c2c3454200c488f/src/include/access/xlogdefs.h#L59 and + // https://github.com/postgres/postgres/commit/ec40f3422412cfdc140b5d3f67db7fd2dac0f1e2). + // Because of this, it is safe to always parse the values we get as unit although, according to the row description message + // we formally could also get a signed int or long value. + // Whenever ReadSingleRow gets used in a new context we have to check, whether this contract is still + // valid in that context and if it isn't, adjust the method accordingly (e.g. by switching on the command). case "integer": - { - var str = buf.ReadString(len); - if (!uint.TryParse(str, NumberStyles.None, null, out var num)) - { - throw Connector.Break( - new NpgsqlException( - $"Could not parse '{str}' as unsigned integer in field {field.Name}")); - } - - results[i] = num; - continue; - } case "bigint": { var str = buf.ReadString(len); - if (!ulong.TryParse(str, NumberStyles.None, null, out var num)) + if (!uint.TryParse(str, NumberStyles.None, null, out var num)) { throw Connector.Break( new NpgsqlException( @@ -872,8 +815,8 @@ async Task ReadSingleRow(string command, CancellationToken cancellatio } } - Expect(await Connector.ReadMessage(true), Connector); - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); return results; static byte[] ParseBytea(ReadOnlySpan bytes) @@ -946,7 +889,9 @@ static byte[] ParseByteaEscape(ReadOnlySpan inBytes) void SetTimeouts(TimeSpan readTimeout, TimeSpan writeTimeout) { var connector = Connector; - connector.UserTimeout = readTimeout > TimeSpan.Zero ? (int)readTimeout.TotalMilliseconds : 0; + var readBuffer = connector.ReadBuffer; + if (readBuffer != null) + readBuffer.Timeout = readTimeout > TimeSpan.Zero ? readTimeout : Timeout.InfiniteTimeSpan; var writeBuffer = connector.WriteBuffer; if (writeBuffer != null) @@ -958,4 +903,4 @@ internal void CheckDisposed() if (_isDisposed) throw new ObjectDisposedException(GetType().Name); } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/ReplicationMessage.cs b/src/Npgsql/Replication/ReplicationMessage.cs index be957346cb..4beac0d644 100644 --- a/src/Npgsql/Replication/ReplicationMessage.cs +++ b/src/Npgsql/Replication/ReplicationMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication; diff --git a/src/Npgsql/Replication/ReplicationSlot.cs b/src/Npgsql/Replication/ReplicationSlot.cs index 8790303444..ab4fb22f84 100644 --- a/src/Npgsql/Replication/ReplicationSlot.cs +++ b/src/Npgsql/Replication/ReplicationSlot.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Contains information about a newly-created replication slot. @@ -6,12 +6,10 @@ public abstract class ReplicationSlot { internal ReplicationSlot(string name) - { - Name = name; - } + => Name = name; /// /// The name of the newly-created replication slot. /// public string Name { get; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/ReplicationSlotOptions.cs b/src/Npgsql/Replication/ReplicationSlotOptions.cs index 669e8711df..93141d55df 100644 --- a/src/Npgsql/Replication/ReplicationSlotOptions.cs +++ b/src/Npgsql/Replication/ReplicationSlotOptions.cs @@ -1,4 +1,4 @@ -using System; +using System; using NpgsqlTypes; namespace Npgsql.Replication; diff --git a/src/Npgsql/Replication/ReplicationSystemIdentification.cs b/src/Npgsql/Replication/ReplicationSystemIdentification.cs index a3d91a6674..4bd59890f9 100644 --- a/src/Npgsql/Replication/ReplicationSystemIdentification.cs +++ b/src/Npgsql/Replication/ReplicationSystemIdentification.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; namespace Npgsql.Replication; @@ -35,4 +35,4 @@ internal ReplicationSystemIdentification(string systemId, uint timeline, NpgsqlL /// Database connected to. /// public string? DbName { get; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs index 406b39db7f..aca7ee70ea 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs @@ -33,17 +33,10 @@ internal TestDecodingAsyncEnumerable( _walLocation = walLocation; } - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - { - return StartReplicationInternal( - CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token); - } - } + cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token; - async IAsyncEnumerator StartReplicationInternal(CancellationToken cancellationToken) - { var stream = _connection.StartLogicalReplication( _slot, cancellationToken, _walLocation, _options.GetOptionPairs()); var encoding = _connection.Encoding!; @@ -52,7 +45,7 @@ async IAsyncEnumerator StartReplicationInternal(CancellationTo try { - await foreach (var msg in stream.WithCancellation(cancellationToken)) + await foreach (var msg in stream.ConfigureAwait(false)) { var len = (int)msg.Data.Length; Debug.Assert(msg.Data.Position == 0); @@ -65,7 +58,7 @@ async IAsyncEnumerator StartReplicationInternal(CancellationTo var offset = 0; while (offset < len) { - var read = await msg.Data.ReadAsync(buffer, offset, len - offset, CancellationToken.None); + var read = await msg.Data.ReadAsync(buffer, offset, len - offset, CancellationToken.None).ConfigureAwait(false); if (read == 0) throw new EndOfStreamException(); offset += read; @@ -82,4 +75,4 @@ async IAsyncEnumerator StartReplicationInternal(CancellationTo ArrayPool.Shared.Return(buffer); } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs index 77321711d9..a09d16b8e8 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using NpgsqlTypes; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs index c887a015ad..178b0ba87e 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.TestDecoding; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs index b0887a3885..4e90e19bb4 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; namespace Npgsql.Replication.TestDecoding; @@ -97,18 +97,5 @@ public override bool Equals(object? obj) /// public override int GetHashCode() - { -#if NETSTANDARD2_0 - var hashCode = IncludeXids.GetHashCode(); - hashCode = (hashCode * 397) ^ IncludeTimestamp.GetHashCode(); - hashCode = (hashCode * 397) ^ ForceBinary.GetHashCode(); - hashCode = (hashCode * 397) ^ SkipEmptyXacts.GetHashCode(); - hashCode = (hashCode * 397) ^ OnlyLocal.GetHashCode(); - hashCode = (hashCode * 397) ^ IncludeRewrites.GetHashCode(); - hashCode = (hashCode * 397) ^ StreamChanges.GetHashCode(); - return hashCode; -#else - return HashCode.Combine(IncludeXids, IncludeTimestamp, ForceBinary, SkipEmptyXacts, OnlyLocal, IncludeRewrites, StreamChanges); -#endif - } -} \ No newline at end of file + => HashCode.Combine(IncludeXids, IncludeTimestamp, ForceBinary, SkipEmptyXacts, OnlyLocal, IncludeRewrites, StreamChanges); +} diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs index cc5c52e5a4..9e1e5db5a0 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs @@ -1,4 +1,4 @@ -using Npgsql.Replication.Internal; +using Npgsql.Replication.Internal; namespace Npgsql.Replication.TestDecoding; diff --git a/src/Npgsql/Replication/TimelineHistoryFile.cs b/src/Npgsql/Replication/TimelineHistoryFile.cs index 89a15ffd69..44934d63e5 100644 --- a/src/Npgsql/Replication/TimelineHistoryFile.cs +++ b/src/Npgsql/Replication/TimelineHistoryFile.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Represents a PostgreSQL timeline history file diff --git a/src/Npgsql/Replication/XLogDataMessage.cs b/src/Npgsql/Replication/XLogDataMessage.cs index 6b4ecd6dcf..55e8c7ebab 100644 --- a/src/Npgsql/Replication/XLogDataMessage.cs +++ b/src/Npgsql/Replication/XLogDataMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using NpgsqlTypes; diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 835cfbe424..d9b1f77b7d 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -1,16 +1,17 @@ -using System; +using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; -using System.Linq; +using System.Data.Common; using System.Threading; using System.Threading.Tasks; using System.Transactions; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandlers.CompositeHandlers; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; using Npgsql.Util; +using NpgsqlTypes; namespace Npgsql.Schema; @@ -30,77 +31,79 @@ internal DbColumnSchemaGenerator(NpgsqlConnection connection, RowDescriptionMess #region Columns queries static string GenerateColumnsQuery(Version pgVersion, string columnFieldFilter) => - $@"SELECT - typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, - {(pgVersion.IsGreaterOrEqual(10) ? "attidentity != ''" : "FALSE")} AS isidentity, - CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, - CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, - CASE WHEN col.is_updatable = 'YES' THEN true ELSE false END AS is_updatable, - EXISTS ( - SELECT * FROM pg_index - WHERE pg_index.indrelid = cls.oid AND - pg_index.indisprimary AND - attnum = ANY (indkey) - ) AS isprimarykey, - EXISTS ( - SELECT * FROM pg_index - WHERE pg_index.indrelid = cls.oid AND - pg_index.indisunique AND - pg_index.{(pgVersion.IsGreaterOrEqual(11) ? "indnkeyatts" : "indnatts")} = 1 AND - attnum = pg_index.indkey[0] - ) AS isunique + $""" +SELECT + typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, + {(pgVersion.IsGreaterOrEqual(10) ? "attidentity != ''" : "FALSE")} AS isidentity, + CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, + CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, + ((cls.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])) + OR ((cls.relkind = ANY (ARRAY['v'::"char", 'f'::"char"])) + AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) + {(pgVersion.IsGreaterOrEqual(10) ? "AND attr.attidentity NOT IN ('a')" : "")} + AS is_updatable, + EXISTS ( + SELECT * FROM pg_index + WHERE pg_index.indrelid = cls.oid AND + pg_index.indisprimary AND + attnum = ANY (indkey) + ) AS isprimarykey, + EXISTS ( + SELECT * FROM pg_index + WHERE pg_index.indrelid = cls.oid AND + pg_index.indisunique AND + pg_index.{(pgVersion.IsGreaterOrEqual(11) ? "indnkeyatts" : "indnatts")} = 1 AND + attnum = pg_index.indkey[0] + ) AS isunique FROM pg_attribute AS attr JOIN pg_type AS typ ON attr.atttypid = typ.oid JOIN pg_class AS cls ON cls.oid = attr.attrelid JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace -LEFT OUTER JOIN information_schema.columns AS col ON col.table_schema = nspname AND - col.table_name = relname AND - col.column_name = attname WHERE - atttypid <> 0 AND - relkind IN ('r', 'v', 'm') AND - NOT attisdropped AND - nspname NOT IN ('pg_catalog', 'information_schema') AND - attnum > 0 AND - ({columnFieldFilter}) -ORDER BY attnum"; + atttypid <> 0 AND + relkind IN ('r', 'v', 'm', 'p') AND + NOT attisdropped AND + nspname NOT IN ('pg_catalog', 'information_schema') AND + attnum > 0 AND + ({columnFieldFilter}) +ORDER BY attnum +"""; /// /// Stripped-down version of , mainly to support Amazon Redshift. /// static string GenerateOldColumnsQuery(string columnFieldFilter) => - $@"SELECT - typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, - CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, - CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, - TRUE AS is_updatable, /* Supported only since PG 8.2 */ - FALSE AS isprimarykey, /* Can't do ANY() on pg_index.indkey which is int2vector */ - FALSE AS isunique /* Can't do ANY() on pg_index.indkey which is int2vector */ + $""" +SELECT + typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, + CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, + CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, + TRUE AS is_updatable, /* Supported only since PG 8.2 */ + FALSE AS isprimarykey, /* Can't do ANY() on pg_index.indkey which is int2vector */ + FALSE AS isunique /* Can't do ANY() on pg_index.indkey which is int2vector */ FROM pg_attribute AS attr JOIN pg_type AS typ ON attr.atttypid = typ.oid JOIN pg_class AS cls ON cls.oid = attr.attrelid JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace -LEFT OUTER JOIN information_schema.columns AS col ON col.table_schema = nspname AND - col.table_name = relname AND - col.column_name = attname WHERE - atttypid <> 0 AND - relkind IN ('r', 'v', 'm') AND - NOT attisdropped AND - nspname NOT IN ('pg_catalog', 'information_schema') AND - attnum > 0 AND - ({columnFieldFilter}) -ORDER BY attnum"; + atttypid <> 0 AND + relkind IN ('r', 'v', 'm') AND + NOT attisdropped AND + nspname NOT IN ('pg_catalog', 'information_schema') AND + attnum > 0 AND + ({columnFieldFilter}) +ORDER BY attnum +"""; #endregion Column queries - internal async Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) + internal async Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) where T : DbColumn { // This is mainly for Amazon Redshift var oldQueryMode = _connection.PostgreSqlVersion < new Version(8, 2); var numFields = _rowDescription.Count; - var result = new List(numFields); + var result = new List(numFields); for (var i = 0; i < numFields; i++) result.Add(null); var populatedColumns = 0; @@ -111,31 +114,36 @@ internal async Task> GetColumnSchema(bool asy // and those that don't (e.g. SELECT 8). For the former we load lots of info from // the backend (if fetchAdditionalInfo is true), for the latter we only have the RowDescription - var columnFieldFilter = _rowDescription - .Where(f => f.TableOID != 0) // Only column fields - .Select(c => $"(attr.attrelid={c.TableOID} AND attr.attnum={c.ColumnAttributeNumber})") - .Join(" OR "); - + var filters = new List(); + for (var index = 0; index < _rowDescription.Count; index++) + { + var f = _rowDescription[index]; + // Only column fields + if (f.TableOID != 0) + filters.Add($"(attr.attrelid={f.TableOID} AND attr.attnum={f.ColumnAttributeNumber})"); + } + + var columnFieldFilter = string.Join(" OR ", filters); if (columnFieldFilter != string.Empty) { var query = oldQueryMode ? GenerateOldColumnsQuery(columnFieldFilter) : GenerateColumnsQuery(_connection.PostgreSqlVersion, columnFieldFilter); - + using var scope = new TransactionScope( TransactionScopeOption.Suppress, async ? TransactionScopeAsyncFlowOption.Enabled : TransactionScopeAsyncFlowOption.Suppress); using var connection = (NpgsqlConnection)((ICloneable)_connection).Clone(); - - await connection.Open(async, cancellationToken); + + await connection.Open(async, cancellationToken).ConfigureAwait(false); using var cmd = new NpgsqlCommand(query, connection); - var reader = await cmd.ExecuteReader(CommandBehavior.Default, async, cancellationToken); + var reader = await cmd.ExecuteReader(async, CommandBehavior.Default, cancellationToken).ConfigureAwait(false); try { - while (async ? await reader.ReadAsync(cancellationToken) : reader.Read()) + while (async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read()) { - var column = LoadColumnDefinition(reader, _connection.Connector!.TypeMapper.DatabaseInfo, oldQueryMode); + var column = LoadColumnDefinition(reader, _connection.Connector!.DatabaseInfo, oldQueryMode); for (var ordinal = 0; ordinal < numFields; ordinal++) { var field = _rowDescription[ordinal]; @@ -149,7 +157,7 @@ internal async Task> GetColumnSchema(bool asy // The column's ordinal is with respect to the resultset, not its table column.ColumnOrdinal = ordinal; - result[ordinal] = column; + result[ordinal] = (T?)(object)column; } } } @@ -157,7 +165,7 @@ internal async Task> GetColumnSchema(bool asy finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -168,14 +176,14 @@ internal async Task> GetColumnSchema(bool asy // Fill in whatever info we have from the RowDescription itself for (var i = 0; i < numFields; i++) { - var column = result[i]; + var column = (NpgsqlDbColumn?)(object?)result[i]; var field = _rowDescription[i]; if (column is null) { column = SetUpNonColumnField(field); column.ColumnOrdinal = i; - result[i] = column; + result[i] = (T?)(object)column; populatedColumns++; } @@ -253,19 +261,19 @@ NpgsqlDbColumn SetUpNonColumnField(FieldDescription field) /// void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) { - var typeMapper = _connection.Connector!.TypeMapper; - - column.NpgsqlDbType = typeMapper.GetTypeInfoByOid(column.TypeOID).npgsqlDbType; - column.DataType = typeMapper.TryResolveByOID(column.TypeOID, out var handler) - ? handler.GetFieldType() - : null; + var serializerOptions = _connection.Connector!.SerializerOptions; - if (column.DataType != null) + // Call GetRepresentationalType to also handle domain types + // Because NpgsqlCommandBuilder relies on NpgsqlDbType for correct type mapping + // And otherwise we'll get NpgsqlDbType.Unknown + column.NpgsqlDbType = column.PostgresType.GetRepresentationalType().DataTypeName.ToNpgsqlDbType(); + if (serializerOptions.GetTypeInfo(typeof(object), serializerOptions.ToCanonicalTypeId(column.PostgresType)) is { } typeInfo) { - column.IsLong = handler is ByteaHandler; + column.DataType = typeInfo.Type; + column.IsLong = column.PostgresType.DataTypeName == DataTypeNames.Bytea; - if (handler is ICompositeHandler) - column.UdtAssemblyQualifiedName = column.DataType.AssemblyQualifiedName; + if (column.PostgresType is PostgresCompositeType) + column.UdtAssemblyQualifiedName = typeInfo.Type.AssemblyQualifiedName; } var facets = column.PostgresType.GetFacets(typeModifier); @@ -276,4 +284,4 @@ void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) if (facets.Scale != null) column.NumericScale = facets.Scale; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Schema/NpgsqlDbColumn.cs b/src/Npgsql/Schema/NpgsqlDbColumn.cs index 4b118e97f6..9ba8b04312 100644 --- a/src/Npgsql/Schema/NpgsqlDbColumn.cs +++ b/src/Npgsql/Schema/NpgsqlDbColumn.cs @@ -1,6 +1,5 @@ -using System; +using System; using System.Data.Common; -using System.Runtime.CompilerServices; using Npgsql.PostgresTypes; using NpgsqlTypes; @@ -32,7 +31,7 @@ public NpgsqlDbColumn() } internal NpgsqlDbColumn Clone() => - Unsafe.As(MemberwiseClone()); + (NpgsqlDbColumn)MemberwiseClone(); #region Standard fields // ReSharper disable once InconsistentNaming @@ -232,4 +231,4 @@ public override object? this[string propertyName] }; #endregion Npgsql-specific fields -} \ No newline at end of file +} diff --git a/src/Npgsql/Shims/Batching.cs b/src/Npgsql/Shims/Batching.cs deleted file mode 100644 index c8e7ddec1c..0000000000 --- a/src/Npgsql/Shims/Batching.cs +++ /dev/null @@ -1,130 +0,0 @@ -#if !NET6_0_OR_GREATER -using System.Collections; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; - -#pragma warning disable 1591,RS0016 - -// ReSharper disable once CheckNamespace -namespace System.Data.Common -{ - public abstract class DbBatch : IDisposable, IAsyncDisposable - { - public DbBatchCommandCollection BatchCommands => DbBatchCommands; - - protected abstract DbBatchCommandCollection DbBatchCommands { get; } - - public abstract int Timeout { get; set; } - - public DbConnection? Connection - { - get => DbConnection; - set => DbConnection = value; - } - - protected abstract DbConnection? DbConnection { get; set; } - - public DbTransaction? Transaction - { - get => DbTransaction; - set => DbTransaction = value; - } - - protected abstract DbTransaction? DbTransaction { get; set; } - - public DbDataReader ExecuteReader(CommandBehavior behavior = CommandBehavior.Default) - => ExecuteDbDataReader(behavior); - - protected abstract DbDataReader ExecuteDbDataReader(CommandBehavior behavior); - - public Task ExecuteReaderAsync(CancellationToken cancellationToken = default) - => ExecuteDbDataReaderAsync(CommandBehavior.Default, cancellationToken); - - public Task ExecuteReaderAsync( - CommandBehavior behavior, - CancellationToken cancellationToken = default) - => ExecuteDbDataReaderAsync(behavior, cancellationToken); - - protected abstract Task ExecuteDbDataReaderAsync( - CommandBehavior behavior, - CancellationToken cancellationToken); - - public abstract int ExecuteNonQuery(); - - public abstract Task ExecuteNonQueryAsync(CancellationToken cancellationToken = default); - - public abstract object? ExecuteScalar(); - - public abstract Task ExecuteScalarAsync(CancellationToken cancellationToken = default); - - public abstract void Prepare(); - - public abstract Task PrepareAsync(CancellationToken cancellationToken = default); - - public abstract void Cancel(); - - public DbBatchCommand CreateBatchCommand() => CreateDbBatchCommand(); - - protected abstract DbBatchCommand CreateDbBatchCommand(); - - public virtual void Dispose() {} - - public virtual ValueTask DisposeAsync() - { - Dispose(); - return default; - } - } - - public abstract class DbBatchCommand - { - public abstract string CommandText { get; set; } - - public abstract CommandType CommandType { get; set; } - - public abstract int RecordsAffected { get; } - - public DbParameterCollection Parameters => DbParameterCollection; - - protected abstract DbParameterCollection DbParameterCollection { get; } - } - - public abstract class DbBatchCommandCollection : IList - { - public abstract IEnumerator GetEnumerator(); - - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - - public abstract void Add(DbBatchCommand item); - - public abstract void Clear(); - - public abstract bool Contains(DbBatchCommand item); - - public abstract void CopyTo(DbBatchCommand[] array, int arrayIndex); - - public abstract bool Remove(DbBatchCommand item); - - public abstract int Count { get; } - - public abstract bool IsReadOnly { get; } - - public abstract int IndexOf(DbBatchCommand item); - - public abstract void Insert(int index, DbBatchCommand item); - - public abstract void RemoveAt(int index); - - public DbBatchCommand this[int index] - { - get => GetBatchCommand(index); - set => SetBatchCommand(index, value); - } - - protected abstract DbBatchCommand GetBatchCommand(int index); - - protected abstract void SetBatchCommand(int index, DbBatchCommand batchCommand); - } -} -#endif diff --git a/src/Npgsql/Shims/DbDataReaderExtensions.cs b/src/Npgsql/Shims/DbDataReaderExtensions.cs deleted file mode 100644 index 5b56c31f55..0000000000 --- a/src/Npgsql/Shims/DbDataReaderExtensions.cs +++ /dev/null @@ -1,22 +0,0 @@ -#if NETSTANDARD2_0 - -#pragma warning disable 1591 - -using System.Data.Common; - -// ReSharper disable once CheckNamespace -namespace System.Data -{ - static class DataReaderExtensions - { - public static char GetChar(this DbDataReader reader, string name) - => reader.GetChar(reader.GetOrdinal(name)); - - public static string GetString(this DbDataReader reader, string name) - => reader.GetString(reader.GetOrdinal(name)); - - public static bool IsDBNull(this DbDataReader reader, string name) - => reader.IsDBNull(reader.GetOrdinal(name)); - } -} -#endif diff --git a/src/Npgsql/Shims/DbDataSource.cs b/src/Npgsql/Shims/DbDataSource.cs deleted file mode 100644 index fd720bb65b..0000000000 --- a/src/Npgsql/Shims/DbDataSource.cs +++ /dev/null @@ -1,70 +0,0 @@ -#if !NET7_0_OR_GREATER - -using System.Threading; -using System.Threading.Tasks; - -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member (compatibility shim for old TFMs) - -// ReSharper disable once CheckNamespace -namespace System.Data.Common; - -public abstract class DbDataSource : IDisposable, IAsyncDisposable -{ - public abstract string ConnectionString { get; } - - protected abstract DbConnection CreateDbConnection(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual DbConnection OpenDbConnection() - => throw new NotSupportedException(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual ValueTask OpenDbConnectionAsync(CancellationToken cancellationToken = default) - => throw new NotSupportedException(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual DbCommand CreateDbCommand(string? commandText = null) - => throw new NotSupportedException(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual DbBatch CreateDbBatch() - => throw new NotSupportedException(); - - public DbConnection CreateConnection() - => CreateDbConnection(); - - public DbConnection OpenConnection() - => OpenDbConnection(); - - public ValueTask OpenConnectionAsync(CancellationToken cancellationToken = default) - => OpenDbConnectionAsync(cancellationToken); - - public DbCommand CreateCommand(string? commandText = null) - => CreateDbCommand(); - - public DbBatch CreateBatch() - => CreateDbBatch(); - - public void Dispose() - { - Dispose(disposing: true); - GC.SuppressFinalize(this); - } - - public async ValueTask DisposeAsync() - { - await DisposeAsyncCore().ConfigureAwait(false); - - Dispose(disposing: false); - GC.SuppressFinalize(this); - } - - protected virtual void Dispose(bool disposing) - { - } - - protected virtual ValueTask DisposeAsyncCore() - => default; -} - -#endif \ No newline at end of file diff --git a/src/Npgsql/Shims/DictonaryExtensions.cs b/src/Npgsql/Shims/DictonaryExtensions.cs deleted file mode 100644 index a13397a39e..0000000000 --- a/src/Npgsql/Shims/DictonaryExtensions.cs +++ /dev/null @@ -1,19 +0,0 @@ -#if NETSTANDARD2_0 -// ReSharper disable once CheckNamespace -namespace System.Collections.Generic; - -// Helpers for Dictionary before netstandard 2.1 -static class DictonaryExtensions -{ - public static bool TryAdd(this Dictionary dictionary, TKey key, TValue value) - { - if (!dictionary.ContainsKey(key)) - { - dictionary.Add(key, value); - return true; - } - - return false; - } -} -#endif diff --git a/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs b/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs deleted file mode 100644 index c805e984a5..0000000000 --- a/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs +++ /dev/null @@ -1,17 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Runtime.CompilerServices; -using System.Text; - -namespace Npgsql.Netstandard20; - -static class ReadOnlySpanOfCharExtensions -{ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int ParseInt(this ReadOnlySpan span) - => int.Parse(span -#if NETSTANDARD2_0 - .ToString() -#endif - ); -} \ No newline at end of file diff --git a/src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs b/src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs deleted file mode 100644 index 4f7673959f..0000000000 --- a/src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs +++ /dev/null @@ -1,48 +0,0 @@ -#if !NET6_0_OR_GREATER - -// ReSharper disable once CheckNamespace -namespace System.Runtime.Versioning; - -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member -#pragma warning disable RS0016 // Add public types and members to the declared API - -[AttributeUsage(AttributeTargets.Assembly | - AttributeTargets.Module | - AttributeTargets.Class | - AttributeTargets.Interface | - AttributeTargets.Delegate | - AttributeTargets.Struct | - AttributeTargets.Enum | - AttributeTargets.Constructor | - AttributeTargets.Method | - AttributeTargets.Property | - AttributeTargets.Field | - AttributeTargets.Event, Inherited = false)] -public sealed class RequiresPreviewFeaturesAttribute : Attribute -{ - /// - /// Initializes a new instance of the class. - /// - public RequiresPreviewFeaturesAttribute() { } - - /// - /// Initializes a new instance of the class with the specified message. - /// - /// An optional message associated with this attribute instance. - public RequiresPreviewFeaturesAttribute(string? message) - { - Message = message; - } - - /// - /// Returns the optional message associated with this attribute instance. - /// - public string? Message { get; } - - /// - /// Returns the optional URL associated with this attribute instance. - /// - public string? Url { get; set; } -} - -#endif \ No newline at end of file diff --git a/src/Npgsql/Shims/StreamExtensions.cs b/src/Npgsql/Shims/StreamExtensions.cs deleted file mode 100644 index 925061870d..0000000000 --- a/src/Npgsql/Shims/StreamExtensions.cs +++ /dev/null @@ -1,71 +0,0 @@ -#if NETSTANDARD2_0 -using System.Buffers; -using System.Threading; -using System.Threading.Tasks; - -// ReSharper disable once CheckNamespace -namespace System.IO -{ - // Helpers to read/write Span/Memory to Stream before netstandard 2.1 - static class StreamExtensions - { - public static int Read(this Stream stream, Span buffer) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - try - { - var numRead = stream.Read(sharedBuffer, 0, buffer.Length); - new Span(sharedBuffer, 0, numRead).CopyTo(buffer); - return numRead; - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - - public static async ValueTask ReadAsync(this Stream stream, Memory buffer, CancellationToken cancellationToken = default) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - try - { - var result = await stream.ReadAsync(sharedBuffer, 0, buffer.Length, cancellationToken); - new Span(sharedBuffer, 0, result).CopyTo(buffer.Span); - return result; - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - - public static void Write(this Stream stream, ReadOnlySpan buffer) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - try - { - buffer.CopyTo(sharedBuffer); - stream.Write(sharedBuffer, 0, buffer.Length); - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - - public static async ValueTask WriteAsync(this Stream stream, ReadOnlyMemory buffer, CancellationToken cancellationToken = default) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - buffer.Span.CopyTo(sharedBuffer); - try - { - await stream.WriteAsync(sharedBuffer, 0, buffer.Length, cancellationToken); - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - } -} -#endif diff --git a/src/Npgsql/Shims/StringBuilderExtensions.cs b/src/Npgsql/Shims/StringBuilderExtensions.cs deleted file mode 100644 index 8d152be563..0000000000 --- a/src/Npgsql/Shims/StringBuilderExtensions.cs +++ /dev/null @@ -1,33 +0,0 @@ -#if NETSTANDARD2_0 - -// ReSharper disable once CheckNamespace -namespace System.Text -{ - /// - /// A set of extension methods to to allow runtime compatibility. - /// - static class StringBuilderExtensions - { - /// - /// Appends the provided to the . - /// - /// The to append to. - /// The to append. - public static StringBuilder Append(this StringBuilder stringBuilder, ReadOnlySpan span) - { - if (span.Length > 0) - { - unsafe - { - fixed (char* value = &span.GetPinnableReference()) - { - return stringBuilder.Append(value, span.Length); - } - } - } - - return stringBuilder; - } - } -} -#endif diff --git a/src/Npgsql/Shims/TaskExtensions.cs b/src/Npgsql/Shims/TaskExtensions.cs deleted file mode 100644 index 300b0bc541..0000000000 --- a/src/Npgsql/Shims/TaskExtensions.cs +++ /dev/null @@ -1,65 +0,0 @@ -#if !NET6_0_OR_GREATER -using System.Collections.Generic; - -namespace System.Threading.Tasks; - -static class TaskExtensions -{ - /// - /// Gets a that will complete when this completes, when the specified timeout expires, or when the specified has cancellation requested. - /// - /// The representing the asynchronous wait. - /// The timeout after which the should be faulted with a if it hasn't otherwise completed. - /// The to monitor for a cancellation request. - /// The representing the asynchronous wait. - /// This method reproduces new to the .NET 6.0 API .WaitAsync. - public static async Task WaitAsync(this Task task, TimeSpan timeout, CancellationToken cancellationToken) - { - var tasks = new List(3); - - Task? cancellationTask = default; - CancellationTokenRegistration registration = default; - if (cancellationToken.CanBeCanceled) - { - var tcs = new TaskCompletionSource(); - registration = cancellationToken.Register(s => ((TaskCompletionSource)s!).TrySetResult(true), tcs); - cancellationTask = tcs.Task; - tasks.Add(cancellationTask); - } - - Task? delayTask = default; - CancellationTokenSource? delayCts = default; - if (timeout != Timeout.InfiniteTimeSpan) - { - var timeLeft = timeout; - delayCts = new CancellationTokenSource(); - delayTask = Task.Delay(timeLeft, delayCts.Token); - tasks.Add(delayTask); - } - - try - { - if (tasks.Count != 0) - { - tasks.Add(task); - var result = await Task.WhenAny(tasks); - if (result == cancellationTask) - { - task = Task.FromCanceled(cancellationToken); - } - else if (result == delayTask) - { - task = Task.FromException(new TimeoutException()); - } - } - await task; - } - finally - { - delayCts?.Cancel(); - delayCts?.Dispose(); - registration.Dispose(); - } - } -} -#endif diff --git a/src/Npgsql/Shims/UnixDomainSocketEndPoint.cs b/src/Npgsql/Shims/UnixDomainSocketEndPoint.cs deleted file mode 100644 index 6135590493..0000000000 --- a/src/Npgsql/Shims/UnixDomainSocketEndPoint.cs +++ /dev/null @@ -1,89 +0,0 @@ -#if NETSTANDARD2_0 -using System.Net.Sockets; -using System.Text; - -// ReSharper disable once CheckNamespace -namespace System.Net -{ - // Copied and adapted from https://github.com/mono/mono/blob/master/mcs/class/Mono.Posix/Mono.Unix/UnixEndPoint.cs - sealed class UnixDomainSocketEndPoint : EndPoint - { - string _filename; - - public UnixDomainSocketEndPoint (string filename) - { - if (filename == null) - throw new ArgumentNullException(nameof(filename)); - if (filename == "") - throw new ArgumentException ("Cannot be empty.", nameof(filename)); - _filename = filename; - } - - public string Filename { - get => _filename; - set => _filename = value; - } - - public override AddressFamily AddressFamily => AddressFamily.Unix; - - public override EndPoint Create(SocketAddress socketAddress) - { - /* - * Should also check this - * - int addr = (int) AddressFamily.Unix; - if (socketAddress [0] != (addr & 0xFF)) - throw new ArgumentException ("socketAddress is not a unix socket address."); - if (socketAddress [1] != ((addr & 0xFF00) >> 8)) - throw new ArgumentException ("socketAddress is not a unix socket address."); - */ - - if (socketAddress.Size == 2) { - // Empty filename. - // Probably from RemoteEndPoint which on linux does not return the file name. - return new UnixDomainSocketEndPoint("a") { _filename = "" }; - } - var size = socketAddress.Size - 2; - var bytes = new byte[size]; - for (var i = 0; i < bytes.Length; i++) { - bytes[i] = socketAddress[i + 2]; - // There may be junk after the null terminator, so ignore it all. - if (bytes[i] == 0) { - size = i; - break; - } - } - - var name = Encoding.UTF8.GetString(bytes, 0, size); - return new UnixDomainSocketEndPoint(name); - } - - public override SocketAddress Serialize() - { - var bytes = Encoding.UTF8.GetBytes(_filename); - var sa = new SocketAddress(AddressFamily, 2 + bytes.Length + 1); - // sa [0] -> family low byte, sa [1] -> family high byte - for (var i = 0; i < bytes.Length; i++) - sa[2 + i] = bytes[i]; - - //NULL suffix for non-abstract path - sa[2 + bytes.Length] = 0; - - return sa; - } - - public override string ToString() => _filename; - - public override int GetHashCode () => _filename.GetHashCode(); - - public override bool Equals(object? o) - { - var other = o as UnixDomainSocketEndPoint; - if (other == null) - return false; - - return (other._filename == _filename); - } - } -} -#endif diff --git a/src/Npgsql/Shims/WaitHandleExtensions.cs b/src/Npgsql/Shims/WaitHandleExtensions.cs deleted file mode 100644 index dbb3cc4259..0000000000 --- a/src/Npgsql/Shims/WaitHandleExtensions.cs +++ /dev/null @@ -1,42 +0,0 @@ -#if NETSTANDARD2_0 -using System.Threading.Tasks; - -// ReSharper disable once CheckNamespace -namespace System.Threading -{ - // https://thomaslevesque.com/2015/06/04/async-and-cancellation-support-for-wait-handles/ - static class WaitHandleExtensions - { - internal static async Task WaitOneAsync( - this WaitHandle handle, int millisecondsTimeout, CancellationToken cancellationToken = default) - { - var tcs = new TaskCompletionSource(); - using var tokenRegistration = - cancellationToken.Register(state => ((TaskCompletionSource)state!).TrySetCanceled(), tcs); - - RegisteredWaitHandle? registeredHandle = null; - try - { - registeredHandle = ThreadPool.RegisterWaitForSingleObject( - handle, - (state, timedOut) => ((TaskCompletionSource)state!).TrySetResult(!timedOut), - state: tcs, - millisecondsTimeout, - executeOnlyOnce: true); - return await tcs.Task; - } - finally - { - registeredHandle?.Unregister(null); - } - } - - internal static Task WaitOneAsync(this WaitHandle handle, TimeSpan timeout, CancellationToken cancellationToken = default) - => handle.WaitOneAsync((int)timeout.TotalMilliseconds, cancellationToken); - - internal static Task WaitOneAsync(this WaitHandle handle, CancellationToken cancellationToken = default) - => handle.WaitOneAsync(Timeout.Infinite, cancellationToken); - } -} - -#endif diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 52b88fe036..3d569c9bc2 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Text; @@ -7,7 +7,9 @@ namespace Npgsql; sealed class SqlQueryParser { - readonly Dictionary _paramIndexMap = new(); + static NpgsqlParameterCollection EmptyParameters { get; } = []; + + readonly Dictionary _paramIndexMap = new(StringComparer.OrdinalIgnoreCase); readonly StringBuilder _rewrittenSql = new(); /// @@ -70,7 +72,7 @@ void ParseRawQuery( // Batching mode. We're processing only one batch - if we encounter a semicolon (legacy batching), that's an error. Debug.Assert(batchCommand is not null); sql = batchCommand.CommandText; - parameters = batchCommand.Parameters; + parameters = batchCommand._parameters ?? EmptyParameters; batchCommands = null; } else @@ -78,7 +80,7 @@ void ParseRawQuery( // Command mode. Semicolons (legacy batching) may occur. Debug.Assert(batchCommand is null); sql = command.CommandText; - parameters = command.Parameters; + parameters = command._parameters ?? EmptyParameters; batchCommands = command.InternalBatchCommands; MoveToNextBatchCommand(); } @@ -207,7 +209,7 @@ void ParseRawQuery( } if (!parameter.IsInputDirection) - throw new Exception($"Parameter '{paramName}' referenced in SQL but is an out-only parameter"); + ThrowHelper.ThrowInvalidOperationException("Parameter '{0}' referenced in SQL but is an out-only parameter", paramName); batchCommand.PositionalParameters.Add(parameter); index = _paramIndexMap[paramName] = batchCommand.PositionalParameters.Count; @@ -466,9 +468,8 @@ void ParseRawQuery( if (command is null) { - throw new NotSupportedException( - $"Specifying multiple SQL statements in a single {nameof(NpgsqlBatchCommand)} isn't supported, " + - "please remove all semicolons."); + ThrowHelper.ThrowNotSupportedException($"Specifying multiple SQL statements in a single {nameof(NpgsqlBatchCommand)} isn't supported, " + + "please remove all semicolons."); } statementIndex++; @@ -485,7 +486,11 @@ void ParseRawQuery( Finish: _rewrittenSql.Append(sql, currTokenBeg, end - currTokenBeg); - batchCommand.FinalCommandText = _rewrittenSql.ToString(); + if (statementIndex is 0 && _paramIndexMap.Count is 0) + // Single statement, no parameters, no rewriting necessary + batchCommand.FinalCommandText = sql; + else + batchCommand.FinalCommandText = _rewrittenSql.ToString(); if (batchCommands is not null && batchCommands.Count > statementIndex + 1) batchCommands.RemoveRange(statementIndex + 1, batchCommands.Count - (statementIndex + 1)); @@ -496,10 +501,12 @@ void MoveToNextBatchCommand() { batchCommand = batchCommands[statementIndex]; batchCommand.Reset(); + batchCommand._parameters = parameters; } else { - batchCommand = new NpgsqlBatchCommand(); + batchCommand = new NpgsqlBatchCommand { _parameters = parameters }; + batchCommand.CommandText = sql; batchCommands.Add(batchCommand); } } diff --git a/src/Npgsql/TaskTimeoutAndCancellation.cs b/src/Npgsql/TaskTimeoutAndCancellation.cs deleted file mode 100644 index 359c4947b1..0000000000 --- a/src/Npgsql/TaskTimeoutAndCancellation.cs +++ /dev/null @@ -1,66 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Util; - -namespace Npgsql; - -/// -/// Utility class to execute a potentially non-cancellable while allowing to timeout and/or cancel awaiting for it and at the same time prevent event if the original fails later. -/// -static class TaskTimeoutAndCancellation -{ - /// - /// Executes a potentially non-cancellable while allowing to timeout and/or cancel awaiting for it. - /// If the given task does not complete within , a is thrown. - /// The executed may be left in an incomplete state after the that this method returns completes dues to timeout and/or cancellation request. - /// The method guarantees that the abandoned, incomplete is not going to produce event if it fails later. - /// - /// Gets the for execution with a combined that attempts to cancel the in an event of the timeout or external cancellation request. - /// The timeout after which the should be faulted with a if it hasn't otherwise completed. - /// The to monitor for a cancellation request. - /// The result . - /// The representing the asynchronous wait. - internal static async Task ExecuteAsync(Func> getTaskFunc, NpgsqlTimeout timeout, CancellationToken cancellationToken) - { - Task? task = default; - await ExecuteAsync(ct => (Task)(task = getTaskFunc(ct)), timeout, cancellationToken); - return await task!; - } - - /// - /// Executes a potentially non-cancellable while allowing to timeout and/or cancel awaiting for it. - /// If the given task does not complete within , a is thrown. - /// The executed may be left in an incomplete state after the that this method returns completes dues to timeout and/or cancellation request. - /// The method guarantees that the abandoned, incomplete is not going to produce event if it fails later. - /// - /// Gets the for execution with a combined that attempts to cancel the in an event of the timeout or external cancellation request. - /// The timeout after which the should be faulted with a if it hasn't otherwise completed. - /// The to monitor for a cancellation request. - /// The representing the asynchronous wait. - internal static async Task ExecuteAsync(Func getTaskFunc, NpgsqlTimeout timeout, CancellationToken cancellationToken) - { - using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; - var task = getTaskFunc(combinedCts?.Token ?? cancellationToken); - try - { - try - { - await task.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken); - } - catch (TimeoutException) when (!task!.IsCompleted) - { - // Attempt to stop the Task in progress. - combinedCts?.Cancel(); - throw; - } - } - catch - { - // Prevent unobserved Task notifications by observing the failed Task exception. - // To test: comment the next line out and re-run TaskExtensionsTest.DelayedFaultedTaskCancellation. - _ = task.ContinueWith(t => _ = t.Exception, CancellationToken.None, TaskContinuationOptions.OnlyOnFaulted, TaskScheduler.Current); - throw; - } - } -} diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 1a05ab5e8b..53e8083df5 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -1,40 +1,110 @@ -using Npgsql.BackendMessages; +using Npgsql.BackendMessages; using System; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal; namespace Npgsql; static class ThrowHelper { [DoesNotReturn] - internal static void ThrowInvalidCastException_NotSupportedType(NpgsqlTypeHandler handler, NpgsqlParameter? parameter, Type type) - { - var parameterName = parameter is null - ? null - : parameter.TrimmedName == string.Empty - ? $"${parameter.Collection!.IndexOf(parameter) + 1}" - : parameter.TrimmedName; + internal static void ThrowArgumentOutOfRangeException() + => throw new ArgumentOutOfRangeException(); - throw new InvalidCastException(parameterName is null - ? $"Cannot write a value of CLR type '{type}' as database type '{handler.PgDisplayName}'." - : $"Cannot write a value of CLR type '{type}' as database type '{handler.PgDisplayName}' for parameter '{parameterName}'."); - } + [DoesNotReturn] + internal static void ThrowArgumentOutOfRangeException(string paramName, string message) + => throw new ArgumentOutOfRangeException(paramName, message); + + [DoesNotReturn] + internal static void ThrowArgumentOutOfRangeException(string paramName, string message, object argument) + => throw new ArgumentOutOfRangeException(paramName, string.Format(message, argument)); + + [DoesNotReturn] + internal static void ThrowUnreachableException(string message, object argument) + => throw new UnreachableException(string.Format(message, argument)); + + [DoesNotReturn] + internal static void ThrowInvalidOperationException() + => throw new InvalidOperationException(); + + [DoesNotReturn] + internal static void ThrowInvalidOperationException(string message) + => throw new InvalidOperationException(message); + + [DoesNotReturn] + internal static void ThrowInvalidOperationException(string message, object argument) + => throw new InvalidOperationException(string.Format(message, argument)); + + [DoesNotReturn] + internal static void ThrowObjectDisposedException(string? objectName) + => throw new ObjectDisposedException(objectName); + + [DoesNotReturn] + internal static void ThrowObjectDisposedException(string objectName, string message) + => throw new ObjectDisposedException(objectName, message); + + [DoesNotReturn] + internal static void ThrowObjectDisposedException(string objectName, Exception? innerException) + => throw new ObjectDisposedException(objectName, innerException); + + [DoesNotReturn] + internal static void ThrowInvalidCastException(string message, object argument) + => throw new InvalidCastException(string.Format(message, argument)); [DoesNotReturn] internal static void ThrowInvalidCastException_NoValue(FieldDescription field) => throw new InvalidCastException($"Column '{field.Name}' is null."); [DoesNotReturn] - internal static void ThrowInvalidOperationException_NoPropertyGetter(Type type, MemberInfo property) => - throw new InvalidOperationException($"Composite type '{type}' cannot be written because the '{property}' property has no getter."); + internal static void ThrowInvalidCastException(string message) => + throw new InvalidCastException(message); + + [DoesNotReturn] + internal static void ThrowInvalidCastException_NoValue() => + throw new InvalidCastException("Field is null."); + + [DoesNotReturn] + internal static void ThrowNpgsqlException(string message) + => throw new NpgsqlException(message); + + [DoesNotReturn] + internal static void ThrowNpgsqlException(string message, Exception? innerException) + => throw new NpgsqlException(message, innerException); + + [DoesNotReturn] + internal static void ThrowNpgsqlOperationInProgressException(NpgsqlCommand command) + => throw new NpgsqlOperationInProgressException(command); + + [DoesNotReturn] + internal static void ThrowNpgsqlOperationInProgressException(ConnectorState state) + => throw new NpgsqlOperationInProgressException(state); + + [DoesNotReturn] + internal static void ThrowArgumentException(string message) + => throw new ArgumentException(message); + + [DoesNotReturn] + internal static void ThrowArgumentException(string message, string paramName) + => throw new ArgumentException(message, paramName); + + [DoesNotReturn] + internal static void ThrowArgumentNullException(string message, string paramName) + => throw new ArgumentNullException(paramName, message); + + [DoesNotReturn] + internal static void ThrowIndexOutOfRangeException(string message) + => throw new IndexOutOfRangeException(message); + + [DoesNotReturn] + internal static void ThrowIndexOutOfRangeException(string message, int argument) + => throw new IndexOutOfRangeException(string.Format(message, argument)); [DoesNotReturn] - internal static void ThrowInvalidOperationException_NoPropertySetter(Type type, MemberInfo property) => - throw new InvalidOperationException($"Composite type '{type}' cannot be read because the '{property}' property has no setter."); + internal static void ThrowNotSupportedException(string? message = null) + => throw new NotSupportedException(message); [DoesNotReturn] - internal static void ThrowInvalidOperationException_BinaryImportParametersMismatch(int columnCount, int valueCount) => - throw new InvalidOperationException($"The binary import operation was started with {columnCount} column(s), but {valueCount} value(s) were provided."); -} \ No newline at end of file + internal static void ThrowNpgsqlExceptionWithInnerTimeoutException(string message) + => throw new NpgsqlException(message, new TimeoutException()); +} diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs deleted file mode 100644 index f7e132fb5d..0000000000 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ /dev/null @@ -1,755 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Collections.Immutable; -using System.Collections.Specialized; -using System.Data; -using System.IO; -using System.Net; -using System.Net.NetworkInformation; -using System.Numerics; -using System.Text.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; -using Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; -using Npgsql.Internal.TypeHandlers.GeometricHandlers; -using Npgsql.Internal.TypeHandlers.InternalTypeHandlers; -using Npgsql.Internal.TypeHandlers.LTreeHandlers; -using Npgsql.Internal.TypeHandlers.NetworkHandlers; -using Npgsql.Internal.TypeHandlers.NumericHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.TypeMapping; - -sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlConnector _connector; - readonly NpgsqlDatabaseInfo _databaseInfo; - - static readonly Type ReadOnlyIPAddressType = IPAddress.Loopback.GetType(); - - static readonly Dictionary Mappings = new() - { - // Numeric types - { "smallint", new(NpgsqlDbType.Smallint, "smallint", typeof(short), typeof(byte), typeof(sbyte)) }, - { "integer", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, - { "int", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, - { "bigint", new(NpgsqlDbType.Bigint, "bigint", typeof(long)) }, - { "real", new(NpgsqlDbType.Real, "real", typeof(float)) }, - { "double precision", new(NpgsqlDbType.Double, "double precision", typeof(double)) }, - { "numeric", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, - { "decimal", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, - { "money", new(NpgsqlDbType.Money, "money") }, - - // Text types - { "text", new(NpgsqlDbType.Text, "text", typeof(string), typeof(char[]), typeof(char), typeof(ArraySegment)) }, - { "xml", new(NpgsqlDbType.Xml, "xml") }, - { "character varying", new(NpgsqlDbType.Varchar, "character varying") }, - { "varchar", new(NpgsqlDbType.Varchar, "character varying") }, - { "character", new(NpgsqlDbType.Char, "character") }, - { "name", new(NpgsqlDbType.Name, "name") }, - { "refcursor", new(NpgsqlDbType.Refcursor, "refcursor") }, - { "citext", new(NpgsqlDbType.Citext, "citext") }, - { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument)) }, - { "json", new(NpgsqlDbType.Json, "json") }, - { "jsonpath", new(NpgsqlDbType.JsonPath, "jsonpath") }, - - // Date/time types - { "timestamp without time zone", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, - { "timestamp", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, - { "timestamp with time zone", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, - { "timestamptz", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, - { "date", new(NpgsqlDbType.Date, "date" -#if NET6_0_OR_GREATER - , typeof(DateOnly) -#endif - ) }, - { "time without time zone", new(NpgsqlDbType.Time, "time without time zone" -#if NET6_0_OR_GREATER - , typeof(TimeOnly) -#endif - ) }, - { "time", new(NpgsqlDbType.Time, "time without time zone" -#if NET6_0_OR_GREATER - , typeof(TimeOnly) -#endif - ) }, - { "time with time zone", new(NpgsqlDbType.TimeTz, "time with time zone") }, - { "timetz", new(NpgsqlDbType.TimeTz, "time with time zone") }, - { "interval", new(NpgsqlDbType.Interval, "interval", typeof(TimeSpan)) }, - - { "timestamp without time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.Timestamp, "timestamp without time zone[]") }, - { "timestamp with time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.TimestampTz, "timestamp with time zone[]") }, - - { "int4range", new(NpgsqlDbType.IntegerRange, "int4range") }, - { "int8range", new(NpgsqlDbType.BigIntRange, "int8range") }, - { "numrange", new(NpgsqlDbType.NumericRange, "numrange") }, - { "daterange", new(NpgsqlDbType.DateRange, "daterange") }, - { "tsrange", new(NpgsqlDbType.TimestampRange, "tsrange") }, - { "tstzrange", new(NpgsqlDbType.TimestampTzRange, "tstzrange") }, - - { "int4multirange", new(NpgsqlDbType.IntegerMultirange, "int4range") }, - { "int8multirange", new(NpgsqlDbType.BigIntMultirange, "int8range") }, - { "nummultirange", new(NpgsqlDbType.NumericMultirange, "numrange") }, - { "datemultirange", new(NpgsqlDbType.DateMultirange, "datemultirange") }, - { "tsmultirange", new(NpgsqlDbType.TimestampMultirange, "tsmultirange") }, - { "tstzmultirange", new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange") }, - - // Network types - { "cidr", new(NpgsqlDbType.Cidr, "cidr") }, -#pragma warning disable 618 - { "inet", new(NpgsqlDbType.Inet, "inet", typeof(IPAddress), typeof((IPAddress Address, int Subnet)), typeof(NpgsqlInet), ReadOnlyIPAddressType) }, -#pragma warning restore 618 - { "macaddr", new(NpgsqlDbType.MacAddr, "macaddr", typeof(PhysicalAddress)) }, - { "macaddr8", new(NpgsqlDbType.MacAddr8, "macaddr8") }, - - // Full-text search types - { "tsquery", new(NpgsqlDbType.TsQuery, "tsquery", - typeof(NpgsqlTsQuery), typeof(NpgsqlTsQueryAnd), typeof(NpgsqlTsQueryEmpty), typeof(NpgsqlTsQueryFollowedBy), - typeof(NpgsqlTsQueryLexeme), typeof(NpgsqlTsQueryNot), typeof(NpgsqlTsQueryOr), typeof(NpgsqlTsQueryBinOp) - ) }, - { "tsvector", new(NpgsqlDbType.TsVector, "tsvector", typeof(NpgsqlTsVector)) }, - - // Geometry types - { "box", new(NpgsqlDbType.Box, "box", typeof(NpgsqlBox)) }, - { "circle", new(NpgsqlDbType.Circle, "circle", typeof(NpgsqlCircle)) }, - { "line", new(NpgsqlDbType.Line, "line", typeof(NpgsqlLine)) }, - { "lseg", new(NpgsqlDbType.LSeg, "lseg", typeof(NpgsqlLSeg)) }, - { "path", new(NpgsqlDbType.Path, "path", typeof(NpgsqlPath)) }, - { "point", new(NpgsqlDbType.Point, "point", typeof(NpgsqlPoint)) }, - { "polygon", new(NpgsqlDbType.Polygon, "polygon", typeof(NpgsqlPolygon)) }, - - // LTree types - { "lquery", new(NpgsqlDbType.LQuery, "lquery") }, - { "ltree", new(NpgsqlDbType.LTree, "ltree") }, - { "ltxtquery", new(NpgsqlDbType.LTxtQuery, "ltxtquery") }, - - // UInt types - { "oid", new(NpgsqlDbType.Oid, "oid") }, - { "xid", new(NpgsqlDbType.Xid, "xid") }, - { "xid8", new(NpgsqlDbType.Xid8, "xid8") }, - { "cid", new(NpgsqlDbType.Cid, "cid") }, - { "regtype", new(NpgsqlDbType.Regtype, "regtype") }, - { "regconfig", new(NpgsqlDbType.Regconfig, "regconfig") }, - - // Misc types - { "boolean", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, - { "bool", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, - { "bytea", new(NpgsqlDbType.Bytea, "bytea", typeof(byte[]), typeof(ArraySegment) -#if !NETSTANDARD2_0 - , typeof(ReadOnlyMemory), typeof(Memory) -#endif - ) }, - { "uuid", new(NpgsqlDbType.Uuid, "uuid", typeof(Guid)) }, - { "bit varying", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, - { "varbit", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, - { "bit", new(NpgsqlDbType.Bit, "bit") }, - { "hstore", new(NpgsqlDbType.Hstore, "hstore", typeof(Dictionary), typeof(IDictionary) -#if !NETSTANDARD2_0 && !NETSTANDARD2_1 - , typeof(ImmutableDictionary) -#endif - ) }, - - // Internal types - { "int2vector", new(NpgsqlDbType.Int2Vector, "int2vector") }, - { "oidvector", new(NpgsqlDbType.Oidvector, "oidvector") }, - { "pg_lsn", new(NpgsqlDbType.PgLsn, "pg_lsn", typeof(NpgsqlLogSequenceNumber)) }, - { "tid", new(NpgsqlDbType.Tid, "tid", typeof(NpgsqlTid)) }, - { "char", new(NpgsqlDbType.InternalChar, "char") }, - - // Special types - { "unknown", new(NpgsqlDbType.Unknown, "unknown") }, - }; - - #region Cached handlers - - // Numeric types - readonly Int16Handler _int16Handler; - readonly Int32Handler _int32Handler; - readonly Int64Handler _int64Handler; - SingleHandler? _singleHandler; - readonly DoubleHandler _doubleHandler; - readonly NumericHandler _numericHandler; - MoneyHandler? _moneyHandler; - - // Text types - readonly TextHandler _textHandler; - TextHandler? _xmlHandler; - TextHandler? _varcharHandler; - TextHandler? _charHandler; - TextHandler? _nameHandler; - TextHandler? _refcursorHandler; - TextHandler? _citextHandler; - JsonHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb - JsonHandler? _jsonHandler; - JsonPathHandler? _jsonPathHandler; - - // Date/time types - readonly TimestampHandler _timestampHandler; - readonly TimestampTzHandler _timestampTzHandler; - readonly DateHandler _dateHandler; - TimeHandler? _timeHandler; - TimeTzHandler? _timeTzHandler; - IntervalHandler? _intervalHandler; - - // Network types - CidrHandler? _cidrHandler; - InetHandler? _inetHandler; - MacaddrHandler? _macaddrHandler; - MacaddrHandler? _macaddr8Handler; - - // Full-text search types - TsQueryHandler? _tsQueryHandler; - TsVectorHandler? _tsVectorHandler; - - // Geometry types - BoxHandler? _boxHandler; - CircleHandler? _circleHandler; - LineHandler? _lineHandler; - LineSegmentHandler? _lineSegmentHandler; - PathHandler? _pathHandler; - PointHandler? _pointHandler; - PolygonHandler? _polygonHandler; - - // LTree types - LQueryHandler? _lQueryHandler; - LTreeHandler? _lTreeHandler; - LTxtQueryHandler? _lTxtQueryHandler; - - // UInt types - UInt32Handler? _oidHandler; - UInt32Handler? _xidHandler; - UInt64Handler? _xid8Handler; - UInt32Handler? _cidHandler; - UInt32Handler? _regtypeHandler; - UInt32Handler? _regconfigHandler; - - // Misc types - readonly BoolHandler _boolHandler; - ByteaHandler? _byteaHandler; - UuidHandler? _uuidHandler; - BitStringHandler? _bitVaryingHandler; - BitStringHandler? _bitHandler; - RecordHandler? _recordHandler; - VoidHandler? _voidHandler; - HstoreHandler? _hstoreHandler; - - // Internal types - Int2VectorHandler? _int2VectorHandler; - OIDVectorHandler? _oidVectorHandler; - PgLsnHandler? _pgLsnHandler; - TidHandler? _tidHandler; - InternalCharHandler? _internalCharHandler; - - // Special types - UnknownTypeHandler? _unknownHandler; - - // Complex type handlers over timestamp/timestamptz (because DateTime is value-dependent) - NpgsqlTypeHandler? _timestampArrayHandler; - NpgsqlTypeHandler? _timestampTzArrayHandler; - NpgsqlTypeHandler? _timestampRangeHandler; - NpgsqlTypeHandler? _timestampTzRangeHandler; - NpgsqlTypeHandler? _timestampMultirangeHandler; - NpgsqlTypeHandler? _timestampTzMultirangeHandler; - - #endregion Cached handlers - - internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) - { - _connector = connector; - _databaseInfo = connector.DatabaseInfo; - - // Eagerly instantiate some handlers for very common types so we don't need to check later - _int16Handler = new Int16Handler(PgType("smallint")); - _int32Handler = new Int32Handler(PgType("integer")); - _int64Handler = new Int64Handler(PgType("bigint")); - _doubleHandler = new DoubleHandler(PgType("double precision")); - _numericHandler = new NumericHandler(PgType("numeric")); - _textHandler ??= new TextHandler(PgType("text"), _connector.TextEncoding); - _timestampHandler ??= new TimestampHandler(PgType("timestamp without time zone")); - _timestampTzHandler ??= new TimestampTzHandler(PgType("timestamp with time zone")); - _dateHandler ??= new DateHandler(PgType("date")); - _boolHandler ??= new BoolHandler(PgType("boolean")); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - // Numeric types - "smallint" => _int16Handler, - "integer" or "int" => _int32Handler, - "bigint" => _int64Handler, - "real" => SingleHandler(), - "double precision" => _doubleHandler, - "numeric" or "decimal" => _numericHandler, - "money" => MoneyHandler(), - - // Text types - "text" => _textHandler, - "xml" => XmlHandler(), - "varchar" or "character varying" => VarcharHandler(), - "character" => CharHandler(), - "name" => NameHandler(), - "refcursor" => RefcursorHandler(), - "citext" => CitextHandler(), - "jsonb" => JsonbHandler(), - "json" => JsonHandler(), - "jsonpath" => JsonPathHandler(), - - // Date/time types - "timestamp" or "timestamp without time zone" => _timestampHandler, - "timestamptz" or "timestamp with time zone" => _timestampTzHandler, - "date" => _dateHandler, - "time without time zone" => TimeHandler(), - "time with time zone" => TimeTzHandler(), - "interval" => IntervalHandler(), - - // Network types - "cidr" => CidrHandler(), - "inet" => InetHandler(), - "macaddr" => MacaddrHandler(), - "macaddr8" => Macaddr8Handler(), - - // Full-text search types - "tsquery" => TsQueryHandler(), - "tsvector" => TsVectorHandler(), - - // Geometry types - "box" => BoxHandler(), - "circle" => CircleHandler(), - "line" => LineHandler(), - "lseg" => LineSegmentHandler(), - "path" => PathHandler(), - "point" => PointHandler(), - "polygon" => PolygonHandler(), - - // LTree types - "lquery" => LQueryHandler(), - "ltree" => LTreeHandler(), - "ltxtquery" => LTxtHandler(), - - // UInt types - "oid" => OidHandler(), - "xid" => XidHandler(), - "xid8" => Xid8Handler(), - "cid" => CidHandler(), - "regtype" => RegtypeHandler(), - "regconfig" => RegconfigHandler(), - - // Misc types - "bool" or "boolean" => _boolHandler, - "bytea" => ByteaHandler(), - "uuid" => UuidHandler(), - "bit varying" or "varbit" => BitVaryingHandler(), - "bit" => BitHandler(), - "hstore" => HstoreHandler(), - - // Internal types - "int2vector" => Int2VectorHandler(), - "oidvector" => OidVectorHandler(), - "pg_lsn" => PgLsnHandler(), - "tid" => TidHandler(), - "char" => InternalCharHandler(), - "record" => RecordHandler(), - "void" => VoidHandler(), - - "unknown" => UnknownHandler(), - - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - { - if (!ClrTypeToDataTypeNameTable.TryGetValue(type, out var dataTypeName)) - { - if (!type.IsSubclassOf(typeof(Stream))) - return null; - - dataTypeName = "bytea"; - } - - return ResolveByDataTypeName(dataTypeName); - } - - static readonly Dictionary ClrTypeToDataTypeNameTable; - - static BuiltInTypeHandlerResolver() - { - ClrTypeToDataTypeNameTable = new() - { - // Numeric types - { typeof(byte), "smallint" }, - { typeof(short), "smallint" }, - { typeof(int), "integer" }, - { typeof(long), "bigint" }, - { typeof(float), "real" }, - { typeof(double), "double precision" }, - { typeof(decimal), "decimal" }, - { typeof(BigInteger), "decimal" }, - - // Text types - { typeof(string), "text" }, - { typeof(char[]), "text" }, - { typeof(char), "text" }, - { typeof(ArraySegment), "text" }, - { typeof(JsonDocument), "jsonb" }, - - // Date/time types - // The DateTime entry is for LegacyTimestampBehavior mode only. In regular mode we resolve through - // ResolveValueDependentValue below - { typeof(DateTime), "timestamp without time zone" }, - { typeof(DateTimeOffset), "timestamp with time zone" }, -#if NET6_0_OR_GREATER - { typeof(DateOnly), "date" }, - { typeof(TimeOnly), "time without time zone" }, -#endif - { typeof(TimeSpan), "interval" }, - { typeof(NpgsqlInterval), "interval" }, - - // Network types - { typeof(IPAddress), "inet" }, - // See ReadOnlyIPAddress below - { typeof((IPAddress Address, int Subnet)), "inet" }, -#pragma warning disable 618 - { typeof(NpgsqlInet), "inet" }, -#pragma warning restore 618 - { typeof(PhysicalAddress), "macaddr" }, - - // Full-text types - { typeof(NpgsqlTsVector), "tsvector" }, - { typeof(NpgsqlTsQueryLexeme), "tsquery" }, - { typeof(NpgsqlTsQueryAnd), "tsquery" }, - { typeof(NpgsqlTsQueryOr), "tsquery" }, - { typeof(NpgsqlTsQueryNot), "tsquery" }, - { typeof(NpgsqlTsQueryEmpty), "tsquery" }, - { typeof(NpgsqlTsQueryFollowedBy), "tsquery" }, - - // Geometry types - { typeof(NpgsqlBox), "box" }, - { typeof(NpgsqlCircle), "circle" }, - { typeof(NpgsqlLine), "line" }, - { typeof(NpgsqlLSeg), "lseg" }, - { typeof(NpgsqlPath), "path" }, - { typeof(NpgsqlPoint), "point" }, - { typeof(NpgsqlPolygon), "polygon" }, - - // Misc types - { typeof(bool), "boolean" }, - { typeof(byte[]), "bytea" }, - { typeof(ArraySegment), "bytea" }, -#if !NETSTANDARD2_0 - { typeof(ReadOnlyMemory), "bytea" }, - { typeof(Memory), "bytea" }, -#endif - { typeof(Guid), "uuid" }, - { typeof(BitArray), "bit varying" }, - { typeof(BitVector32), "bit varying" }, - { typeof(Dictionary), "hstore" }, -#if !NETSTANDARD2_0 && !NETSTANDARD2_1 - { typeof(ImmutableDictionary), "hstore" }, -#endif - - // Internal types - { typeof(NpgsqlLogSequenceNumber), "pg_lsn" }, - { typeof(NpgsqlTid), "tid" }, - { typeof(DBNull), "unknown" }, - - // Built-in range types - { typeof(NpgsqlRange), "int4range" }, - { typeof(NpgsqlRange), "int8range" }, - { typeof(NpgsqlRange), "numrange" }, -#if NET6_0_OR_GREATER - { typeof(NpgsqlRange), "daterange" }, -#endif - - // Built-in multirange types - { typeof(NpgsqlRange[]), "int4multirange" }, - { typeof(List>), "int4multirange" }, - { typeof(NpgsqlRange[]), "int8multirange" }, - { typeof(List>), "int8multirange" }, - { typeof(NpgsqlRange[]), "nummultirange" }, - { typeof(List>), "nummultirange" }, -#if NET6_0_OR_GREATER - { typeof(NpgsqlRange[]), "datemultirange" }, - { typeof(List>), "datemultirange" }, -#endif - }; - - // Recent versions of .NET Core have an internal ReadOnlyIPAddress type (returned e.g. for IPAddress.Loopback) - // But older versions don't have it - if (ReadOnlyIPAddressType != typeof(IPAddress)) - ClrTypeToDataTypeNameTable[ReadOnlyIPAddressType] = "inet"; - - if (LegacyTimestampBehavior) - ClrTypeToDataTypeNameTable[typeof(DateTime)] = "timestamp without time zone"; - } - - public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - DateTime dateTime => dateTime.Kind == DateTimeKind.Utc ? _timestampTzHandler : _timestampHandler, - - // For arrays/lists, return timestamp or timestamptz based on the kind of the first DateTime; if the user attempts to - // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. - IList array => ArrayHandler(array.Count == 0 ? DateTimeKind.Unspecified : array[0].Kind), - - NpgsqlRange range => RangeHandler(!range.LowerBoundInfinite ? range.LowerBound.Kind : - !range.UpperBoundInfinite ? range.UpperBound.Kind : DateTimeKind.Unspecified), - - NpgsqlRange[] multirange => MultirangeHandler(GetMultirangeKind(multirange)), - List> multirange => MultirangeHandler(GetMultirangeKind(multirange)), - - _ => null - }; - - NpgsqlTypeHandler ArrayHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzArrayHandler ??= _timestampTzHandler.CreateArrayHandler( - (PostgresArrayType)PgType("timestamp with time zone[]"), _connector.Settings.ArrayNullabilityMode) - : _timestampArrayHandler ??= _timestampHandler.CreateArrayHandler( - (PostgresArrayType)PgType("timestamp without time zone[]"), _connector.Settings.ArrayNullabilityMode); - - NpgsqlTypeHandler RangeHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzRangeHandler ??= _timestampTzHandler.CreateRangeHandler((PostgresRangeType)PgType("tstzrange")) - : _timestampRangeHandler ??= _timestampHandler.CreateRangeHandler((PostgresRangeType)PgType("tsrange")); - - NpgsqlTypeHandler MultirangeHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzMultirangeHandler ??= _timestampTzHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tstzmultirange")) - : _timestampMultirangeHandler ??= _timestampHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tsmultirange")); - } - - static DateTimeKind GetRangeKind(NpgsqlRange range) - => !range.LowerBoundInfinite - ? range.LowerBound.Kind - : !range.UpperBoundInfinite - ? range.UpperBound.Kind - : DateTimeKind.Unspecified; - - static DateTimeKind GetMultirangeKind(IList> multirange) - { - for (var i = 0; i < multirange.Count; i++) - if (!multirange[i].IsEmpty) - return GetRangeKind(multirange[i]); - - return DateTimeKind.Unspecified; - } - - internal static string? ValueDependentValueToDataTypeName(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - DateTime dateTime => dateTime.Kind == DateTimeKind.Utc ? "timestamp with time zone" : "timestamp without time zone", - - // For arrays/lists, return timestamp or timestamptz based on the kind of the first DateTime; if the user attempts to - // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. - IList array => array.Count == 0 - ? "timestamp without time zone[]" - : array[0].Kind == DateTimeKind.Utc ? "timestamp with time zone[]" : "timestamp without time zone[]", - - NpgsqlRange range => GetRangeKind(range) == DateTimeKind.Utc ? "tstzrange" : "tsrange", - - NpgsqlRange[] multirange => GetMultirangeKind(multirange) == DateTimeKind.Utc ? "tstzmultirange" : "tsmultirange", - - _ => null - }; - } - - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) - { - // This method only ever gets called for value types, and relies on the JIT specializing the method for T by eliding all the - // type checks below. - - // Numeric types - if (typeof(T) == typeof(byte)) - return _int16Handler; - if (typeof(T) == typeof(short)) - return _int16Handler; - if (typeof(T) == typeof(int)) - return _int32Handler; - if (typeof(T) == typeof(long)) - return _int64Handler; - if (typeof(T) == typeof(float)) - return SingleHandler(); - if (typeof(T) == typeof(double)) - return _doubleHandler; - if (typeof(T) == typeof(decimal)) - return _numericHandler; - if (typeof(T) == typeof(BigInteger)) - return _numericHandler; - - // Text types - if (typeof(T) == typeof(char)) - return _textHandler; - if (typeof(T) == typeof(ArraySegment)) - return _textHandler; - if (typeof(T) == typeof(JsonDocument)) - return JsonbHandler(); - - // Date/time types - // No resolution for DateTime, since that's value-dependent (Kind) - if (typeof(T) == typeof(DateTimeOffset)) - return _timestampTzHandler; -#if NET6_0_OR_GREATER - if (typeof(T) == typeof(DateOnly)) - return _dateHandler; - if (typeof(T) == typeof(TimeOnly)) - return _timeHandler; -#endif - if (typeof(T) == typeof(TimeSpan)) - return _intervalHandler; - if (typeof(T) == typeof(NpgsqlInterval)) - return _intervalHandler; - - // Network types - if (typeof(T) == typeof(IPAddress)) - return InetHandler(); - if (typeof(T) == typeof(PhysicalAddress)) - return _macaddrHandler; - if (typeof(T) == typeof(TimeSpan)) - return _intervalHandler; - - // Geometry types - if (typeof(T) == typeof(NpgsqlBox)) - return BoxHandler(); - if (typeof(T) == typeof(NpgsqlCircle)) - return CircleHandler(); - if (typeof(T) == typeof(NpgsqlLine)) - return LineHandler(); - if (typeof(T) == typeof(NpgsqlLSeg)) - return LineSegmentHandler(); - if (typeof(T) == typeof(NpgsqlPath)) - return PathHandler(); - if (typeof(T) == typeof(NpgsqlPoint)) - return PointHandler(); - if (typeof(T) == typeof(NpgsqlPolygon)) - return PolygonHandler(); - - // Misc types - if (typeof(T) == typeof(bool)) - return _boolHandler; - if (typeof(T) == typeof(Guid)) - return UuidHandler(); - if (typeof(T) == typeof(BitVector32)) - return BitVaryingHandler(); - - // Internal types - if (typeof(T) == typeof(NpgsqlLogSequenceNumber)) - return PgLsnHandler(); - if (typeof(T) == typeof(NpgsqlTid)) - return TidHandler(); - if (typeof(T) == typeof(DBNull)) - return UnknownHandler(); - - return null; - } - - internal static string? ClrTypeToDataTypeName(Type type) - => ClrTypeToDataTypeNameTable.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); - - #region Handler accessors - - // Numeric types - NpgsqlTypeHandler SingleHandler() => _singleHandler ??= new SingleHandler(PgType("real")); - NpgsqlTypeHandler MoneyHandler() => _moneyHandler ??= new MoneyHandler(PgType("money")); - - // Text types - NpgsqlTypeHandler XmlHandler() => _xmlHandler ??= new TextHandler(PgType("xml"), _connector.TextEncoding); - NpgsqlTypeHandler VarcharHandler() => _varcharHandler ??= new TextHandler(PgType("character varying"), _connector.TextEncoding); - NpgsqlTypeHandler CharHandler() => _charHandler ??= new TextHandler(PgType("character"), _connector.TextEncoding); - NpgsqlTypeHandler NameHandler() => _nameHandler ??= new TextHandler(PgType("name"), _connector.TextEncoding); - NpgsqlTypeHandler RefcursorHandler() => _refcursorHandler ??= new TextHandler(PgType("refcursor"), _connector.TextEncoding); - NpgsqlTypeHandler? CitextHandler() => _citextHandler ??= _databaseInfo.TryGetPostgresTypeByName("citext", out var pgType) - ? new TextHandler(pgType, _connector.TextEncoding) - : null; - NpgsqlTypeHandler JsonbHandler() => _jsonbHandler ??= new JsonHandler(PgType("jsonb"), _connector.TextEncoding, isJsonb: true); - NpgsqlTypeHandler JsonHandler() => _jsonHandler ??= new JsonHandler(PgType("json"), _connector.TextEncoding, isJsonb: false); - NpgsqlTypeHandler JsonPathHandler() => _jsonPathHandler ??= new JsonPathHandler(PgType("jsonpath"), _connector.TextEncoding); - - // Date/time types - NpgsqlTypeHandler TimeHandler() => _timeHandler ??= new TimeHandler(PgType("time without time zone")); - NpgsqlTypeHandler TimeTzHandler() => _timeTzHandler ??= new TimeTzHandler(PgType("time with time zone")); - NpgsqlTypeHandler IntervalHandler() => _intervalHandler ??= new IntervalHandler(PgType("interval")); - - // Network types - NpgsqlTypeHandler CidrHandler() => _cidrHandler ??= new CidrHandler(PgType("cidr")); - NpgsqlTypeHandler InetHandler() => _inetHandler ??= new InetHandler(PgType("inet")); - NpgsqlTypeHandler MacaddrHandler() => _macaddrHandler ??= new MacaddrHandler(PgType("macaddr")); - NpgsqlTypeHandler Macaddr8Handler() => _macaddr8Handler ??= new MacaddrHandler(PgType("macaddr8")); - - // Full-text search types - NpgsqlTypeHandler TsQueryHandler() => _tsQueryHandler ??= new TsQueryHandler(PgType("tsquery")); - NpgsqlTypeHandler TsVectorHandler() => _tsVectorHandler ??= new TsVectorHandler(PgType("tsvector")); - - // Geometry types - NpgsqlTypeHandler BoxHandler() => _boxHandler ??= new BoxHandler(PgType("box")); - NpgsqlTypeHandler CircleHandler() => _circleHandler ??= new CircleHandler(PgType("circle")); - NpgsqlTypeHandler LineHandler() => _lineHandler ??= new LineHandler(PgType("line")); - NpgsqlTypeHandler LineSegmentHandler() => _lineSegmentHandler ??= new LineSegmentHandler(PgType("lseg")); - NpgsqlTypeHandler PathHandler() => _pathHandler ??= new PathHandler(PgType("path")); - NpgsqlTypeHandler PointHandler() => _pointHandler ??= new PointHandler(PgType("point")); - NpgsqlTypeHandler PolygonHandler() => _polygonHandler ??= new PolygonHandler(PgType("polygon")); - - // LTree types - NpgsqlTypeHandler? LQueryHandler() => _lQueryHandler ??= _databaseInfo.TryGetPostgresTypeByName("lquery", out var pgType) - ? new LQueryHandler(pgType, _connector.TextEncoding) - : null; - NpgsqlTypeHandler? LTreeHandler() => _lTreeHandler ??= _databaseInfo.TryGetPostgresTypeByName("ltree", out var pgType) - ? new LTreeHandler(pgType, _connector.TextEncoding) - : null; - NpgsqlTypeHandler? LTxtHandler() => _lTxtQueryHandler ??= _databaseInfo.TryGetPostgresTypeByName("ltxtquery", out var pgType) - ? new LTxtQueryHandler(pgType, _connector.TextEncoding) - : null; - - // UInt types - NpgsqlTypeHandler OidHandler() => _oidHandler ??= new UInt32Handler(PgType("oid")); - NpgsqlTypeHandler XidHandler() => _xidHandler ??= new UInt32Handler(PgType("xid")); - NpgsqlTypeHandler Xid8Handler() => _xid8Handler ??= new UInt64Handler(PgType("xid8")); - NpgsqlTypeHandler CidHandler() => _cidHandler ??= new UInt32Handler(PgType("cid")); - NpgsqlTypeHandler RegtypeHandler() => _regtypeHandler ??= new UInt32Handler(PgType("regtype")); - NpgsqlTypeHandler RegconfigHandler() => _regconfigHandler ??= new UInt32Handler(PgType("regconfig")); - - // Misc types - NpgsqlTypeHandler ByteaHandler() => _byteaHandler ??= new ByteaHandler(PgType("bytea")); - NpgsqlTypeHandler UuidHandler() => _uuidHandler ??= new UuidHandler(PgType("uuid")); - NpgsqlTypeHandler BitVaryingHandler() => _bitVaryingHandler ??= new BitStringHandler(PgType("bit varying")); - NpgsqlTypeHandler BitHandler() => _bitHandler ??= new BitStringHandler(PgType("bit")); - NpgsqlTypeHandler? HstoreHandler() => _hstoreHandler ??= _databaseInfo.TryGetPostgresTypeByName("hstore", out var pgType) - ? new HstoreHandler(pgType, _textHandler) - : null; - - // Internal types - NpgsqlTypeHandler Int2VectorHandler() => _int2VectorHandler ??= new Int2VectorHandler(PgType("int2vector"), PgType("smallint")); - NpgsqlTypeHandler OidVectorHandler() => _oidVectorHandler ??= new OIDVectorHandler(PgType("oidvector"), PgType("oid")); - NpgsqlTypeHandler PgLsnHandler() => _pgLsnHandler ??= new PgLsnHandler(PgType("pg_lsn")); - NpgsqlTypeHandler TidHandler() => _tidHandler ??= new TidHandler(PgType("tid")); - NpgsqlTypeHandler InternalCharHandler() => _internalCharHandler ??= new InternalCharHandler(PgType("char")); - NpgsqlTypeHandler RecordHandler() => _recordHandler ??= new RecordHandler(PgType("record"), _connector.TypeMapper); - NpgsqlTypeHandler VoidHandler() => _voidHandler ??= new VoidHandler(PgType("void")); - - NpgsqlTypeHandler UnknownHandler() => _unknownHandler ??= new UnknownTypeHandler(_connector.TextEncoding); - - #endregion Handler accessors -} diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs deleted file mode 100644 index 6902ef5aaa..0000000000 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,20 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; - -namespace Npgsql.TypeMapping; - -sealed class BuiltInTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(NpgsqlConnector connector) - => new BuiltInTypeHandlerResolver(connector); - - public override string? GetDataTypeNameByClrType(Type clrType) - => BuiltInTypeHandlerResolver.ClrTypeToDataTypeName(clrType); - - public override string? GetDataTypeNameByValueDependentValue(object value) - => BuiltInTypeHandlerResolver.ValueDependentValueToDataTypeName(value); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => BuiltInTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 9e65675a97..6e28fd158f 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -1,596 +1,291 @@ -using System; -using System.Collections.Concurrent; +using System; using System.Collections.Generic; -using System.Data; using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; -using System.Threading; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.NameTranslation; -using NpgsqlTypes; -using static Npgsql.Util.Statics; +using System.Text.Json; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using Npgsql.Internal.ResolverFactories; namespace Npgsql.TypeMapping; +/// sealed class GlobalTypeMapper : INpgsqlTypeMapper { - public static GlobalTypeMapper Instance { get; } + readonly UserTypeMapper _userTypeMapper = new(); + readonly List _pluginResolverFactories = []; + readonly object _sync = new(); + PgTypeInfoResolverFactory[] _typeMappingResolvers = []; - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = new NpgsqlSnakeCaseNameTranslator(); - - internal List ResolverFactories { get; } = new(); - public ConcurrentDictionary UserTypeMappings { get; } = new(); - - readonly ConcurrentDictionary _mappingsByClrType = new(); - - internal ReaderWriterLockSlim Lock { get; } - = new(LockRecursionPolicy.SupportsRecursion); - - static GlobalTypeMapper() - => Instance = new GlobalTypeMapper(); - - GlobalTypeMapper() - => Reset(); - - #region Mapping management - - public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - where TEnum : struct, Enum + internal IEnumerable GetPluginResolverFactories() { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - Lock.EnterWriteLock(); - try - { - UserTypeMappings[pgName] = new UserEnumTypeMapping(pgName, nameTranslator); - RecordChange(); - return this; - } - finally - { - Lock.ExitWriteLock(); - } + lock (_sync) + return new List(_pluginResolverFactories); } - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - where TEnum : struct, Enum + internal PgTypeInfoResolverFactory? GetUserMappingsResolverFactory() { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); + lock (_sync) + return _userTypeMapper.Items.Count > 0 ? _userTypeMapper : null; + } - Lock.EnterWriteLock(); - try + internal void AddGlobalTypeMappingResolvers(PgTypeInfoResolverFactory[] factories, Func? builderFactory = null, bool overwrite = false) + { + lock (_sync) { - if (UserTypeMappings.TryRemove(pgName, out _)) + // Good enough logic to prevent SlimBuilder overriding the normal Builder. + if (overwrite || factories.Length > _typeMappingResolvers.Length) { - RecordChange(); - return true; + _builderFactory = builderFactory; + _typeMappingResolvers = factories; + _typeMappingOptions = null; } - - return false; - } - finally - { - Lock.ExitWriteLock(); } } - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); + PgSerializerOptions? _typeMappingOptions; + Func? _builderFactory; + JsonSerializerOptions? _jsonSerializerOptions; - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(T), nameTranslator); + PgSerializerOptions TypeMappingOptions => _typeMappingOptions ?? BuildTypeMappingOptions(); - Lock.EnterWriteLock(); - try - { - UserTypeMappings[pgName] = new UserCompositeTypeMapping(pgName, nameTranslator); - RecordChange(); - return this; - } - finally + PgSerializerOptions BuildTypeMappingOptions() + { + lock (_sync) { - Lock.ExitWriteLock(); + if (_typeMappingOptions is { } existing) + return existing; + + var builder = _builderFactory?.Invoke() ?? new(); + builder.AppendResolverFactory(_userTypeMapper); + foreach (var factory in _pluginResolverFactories) + builder.AppendResolverFactory(factory); + foreach (var factory in _typeMappingResolvers) + builder.AppendResolverFactory(factory); + var chain = builder.Build(); + var options = new PgSerializerOptions(PostgresMinimalDatabaseInfo.DefaultTypeCatalog, chain) + { + // This means we don't ever have a missing oid for a datatypename as our canonical format is datatypenames. + PortableTypeIds = true, + // Don't throw if our catalog doesn't know the datatypename. + IntrospectionMode = true + }; + _typeMappingOptions = options; + return options; } } - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + internal DataTypeName? FindDataTypeName(Type type, object? value) { - var openMethod = typeof(GlobalTypeMapper).GetMethod(nameof(MapComposite), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; - var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); - - return this; - } - - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => UnmapComposite(typeof(T), pgName, nameTranslator); - - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(clrType, nameTranslator); - - Lock.EnterWriteLock(); + DataTypeName? dataTypeName; try { - if (UserTypeMappings.TryRemove(pgName, out _)) + var typeInfo = TypeMappingOptions.GetTypeInfoInternal(type, null); + if (typeInfo is PgProviderTypeInfo providerInfo) { - RecordChange(); - return true; + var concreteTypeInfo = providerInfo.MakeConcreteForValueAsObject(value is DBNull ? null : value, out var state); + if (state is not null) + concreteTypeInfo.DisposeWriteState(state); + dataTypeName = concreteTypeInfo.PgTypeId.DataTypeName; + } + else + { + dataTypeName = ((PgConcreteTypeInfo?)typeInfo)?.PgTypeId.DataTypeName; } - - return false; } - finally + catch { - Lock.ExitWriteLock(); + dataTypeName = null; } + return dataTypeName; } - public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) + internal static GlobalTypeMapper Instance { get; } + + static GlobalTypeMapper() + => Instance = new GlobalTypeMapper(); + + /// + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) { - Lock.EnterWriteLock(); - try + lock (_sync) { - // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapped.UseNodaTime, - // we replace an existing resolver of the same CLR type. - var type = resolverFactory.GetType(); + var type = factory.GetType(); - if (ResolverFactories[0].GetType() == type) - ResolverFactories[0] = resolverFactory; - else + // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapper.UseNodaTime, + // we replace an existing resolver of the same CLR type. + if (_pluginResolverFactories.Count > 0 && _pluginResolverFactories[0].GetType() == type) + _pluginResolverFactories[0] = factory; + for (var i = 0; i < _pluginResolverFactories.Count; i++) { - for (var i = 0; i < ResolverFactories.Count; i++) - if (ResolverFactories[i].GetType() == type) - ResolverFactories.RemoveAt(i); - - ResolverFactories.Insert(0, resolverFactory); + if (_pluginResolverFactories[i].GetType() == type) + { + _pluginResolverFactories.RemoveAt(i); + break; + } } - RecordChange(); - } - finally - { - Lock.ExitWriteLock(); + _pluginResolverFactories.Insert(0, factory); + _typeMappingOptions = null; } } + public void AddDbTypeResolverFactory(DbTypeResolverFactory factory) + => throw new NotSupportedException("The global type mapper does not support DbTypeResolverFactories. Call this method on a data source builder instead."); + + /// public void Reset() { - Lock.EnterWriteLock(); - try - { - ResolverFactories.Clear(); - ResolverFactories.Add(new BuiltInTypeHandlerResolverFactory()); - - UserTypeMappings.Clear(); - - RecordChange(); - } - finally + lock (_sync) { - Lock.ExitWriteLock(); + _pluginResolverFactories.Clear(); + _userTypeMapper.Items.Clear(); + _typeMappingOptions = null; } } - internal void RecordChange() - => _mappingsByClrType.Clear(); - - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); - - #endregion Mapping management - - #region NpgsqlDbType/DbType inference for NpgsqlParameter - - [RequiresUnreferencedCode("ToNpgsqlDbType uses interface-based reflection and isn't trimming-safe")] - internal bool TryResolveMappingByValue(object value, [NotNullWhen(true)] out TypeMappingInfo? typeMapping) + /// + public INpgsqlNameTranslator DefaultNameTranslator { - Lock.EnterReadLock(); - try - { - // We resolve as follows: - // 1. Cached by-type lookup (fast path). This will work for almost all types after the very first resolution. - // 2. Value-dependent type lookup (e.g. DateTime by Kind) via the resolvers. This includes complex types (e.g. array/range - // over DateTime), and the results cannot be cached. - // 3. Uncached by-type lookup (for the very first resolution of a given type) - - var type = value.GetType(); - if (_mappingsByClrType.TryGetValue(type, out typeMapping)) - return true; - - foreach (var resolverFactory in ResolverFactories) - if ((typeMapping = resolverFactory.GetMappingByValueDependentValue(value)) is not null) - return true; + get => _userTypeMapper.DefaultNameTranslator; + set => _userTypeMapper.DefaultNameTranslator = value; + } - return TryResolveMappingByClrType(value.GetType(), out typeMapping); - } - finally + /// + public INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + { + lock (_sync) { - Lock.ExitReadLock(); - } + _jsonSerializerOptions = serializerOptions; - bool TryResolveMappingByClrType(Type clrType, [NotNullWhen(true)] out TypeMappingInfo? typeMapping) - { - if (_mappingsByClrType.TryGetValue(clrType, out typeMapping)) - return true; + // If JsonTypeInfoResolverFactory exists we replace it with a configured instance on the same index of the array. + var factory = new JsonTypeInfoResolverFactory(serializerOptions); + var type = factory.GetType(); - foreach (var resolverFactory in ResolverFactories) + for (var i = 0; i < _pluginResolverFactories.Count; i++) { - if ((typeMapping = resolverFactory.GetMappingByClrType(clrType)) is not null) + if (_pluginResolverFactories[i].GetType() == type) { - _mappingsByClrType[clrType] = typeMapping; - return true; + _pluginResolverFactories[i] = factory; + break; } } - if (clrType.IsArray) - { - if (TryResolveMappingByClrType(clrType.GetElementType()!, out var elementMapping)) - { - _mappingsByClrType[clrType] = typeMapping = new( - NpgsqlDbType.Array | elementMapping.NpgsqlDbType, - elementMapping.DataTypeName + "[]"); - return true; - } + _typeMappingOptions = null; + } + return this; + } - typeMapping = null; - return false; - } + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public INpgsqlTypeMapper EnableDynamicJson( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + // Use a re-entered lock to add the read of _jsonSerializerOptions to the total scope. + lock (_sync) + AddTypeInfoResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, _jsonSerializerOptions)); + return this; + } - var typeInfo = clrType.GetTypeInfo(); + /// + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public INpgsqlTypeMapper EnableRecordsAsTuples() + { + AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); + return this; + } - var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => - x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); - if (ilist != null) - { - if (TryResolveMappingByClrType(ilist.GetGenericArguments()[0], out var elementMapping)) - { - _mappingsByClrType[clrType] = typeMapping = new( - NpgsqlDbType.Array | elementMapping.NpgsqlDbType, - elementMapping.DataTypeName + "[]"); - return true; - } + /// + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public INpgsqlTypeMapper EnableUnmappedTypes() + { + AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); + return this; + } - typeMapping = null; - return false; - } + /// + public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum + { + lock (_sync) + { + _userTypeMapper.MapEnum(pgName, nameTranslator); + _typeMappingOptions = null; + return this; + } + } - if (typeInfo.IsGenericType && clrType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - if (TryResolveMappingByClrType(clrType.GetGenericArguments()[0], out var elementMapping)) - { - _mappingsByClrType[clrType] = typeMapping = new( - NpgsqlDbType.Range | elementMapping.NpgsqlDbType, - dataTypeName: null); - return true; - } + /// + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum + { + lock (_sync) + { + var removed = _userTypeMapper.UnmapEnum(pgName, nameTranslator); + _typeMappingOptions = null; + return removed; + } + } - typeMapping = null; - return false; - } + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + lock (_sync) + { + _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + _typeMappingOptions = null; + return this; + } + } - typeMapping = null; - return false; + /// + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + lock (_sync) + { + var removed = _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); + _typeMappingOptions = null; + return removed; } } - #endregion NpgsqlDbType/DbType inference for NpgsqlParameter + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => MapComposite(typeof(T), pgName, nameTranslator); - #region Static translation tables + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => UnmapComposite(typeof(T), pgName, nameTranslator); - public static string? NpgsqlDbTypeToDataTypeName(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch - { - // Numeric types - NpgsqlDbType.Smallint => "smallint", - NpgsqlDbType.Integer => "integer", - NpgsqlDbType.Bigint => "bigint", - NpgsqlDbType.Real => "real", - NpgsqlDbType.Double => "double precision", - NpgsqlDbType.Numeric => "numeric", - NpgsqlDbType.Money => "money", - - // Text types - NpgsqlDbType.Text => "text", - NpgsqlDbType.Xml => "xml", - NpgsqlDbType.Varchar => "character varying", - NpgsqlDbType.Char => "character", - NpgsqlDbType.Name => "name", - NpgsqlDbType.Refcursor => "refcursor", - NpgsqlDbType.Citext => "citext", - NpgsqlDbType.Jsonb => "jsonb", - NpgsqlDbType.Json => "json", - NpgsqlDbType.JsonPath => "jsonpath", - - // Date/time types - NpgsqlDbType.Timestamp => "timestamp without time zone", - NpgsqlDbType.TimestampTz => "timestamp with time zone", - NpgsqlDbType.Date => "date", - NpgsqlDbType.Time => "time without time zone", - NpgsqlDbType.TimeTz => "time with time zone", - NpgsqlDbType.Interval => "interval", - - // Network types - NpgsqlDbType.Cidr => "cidr", - NpgsqlDbType.Inet => "inet", - NpgsqlDbType.MacAddr => "macaddr", - NpgsqlDbType.MacAddr8 => "macaddr8", - - // Full-text search types - NpgsqlDbType.TsQuery => "tsquery", - NpgsqlDbType.TsVector => "tsvector", - - // Geometry types - NpgsqlDbType.Box => "box", - NpgsqlDbType.Circle => "circle", - NpgsqlDbType.Line => "line", - NpgsqlDbType.LSeg => "lseg", - NpgsqlDbType.Path => "path", - NpgsqlDbType.Point => "point", - NpgsqlDbType.Polygon => "polygon", - - // LTree types - NpgsqlDbType.LQuery => "lquery", - NpgsqlDbType.LTree => "ltree", - NpgsqlDbType.LTxtQuery => "ltxtquery", - - // UInt types - NpgsqlDbType.Oid => "oid", - NpgsqlDbType.Xid => "xid", - NpgsqlDbType.Xid8 => "xid8", - NpgsqlDbType.Cid => "cid", - NpgsqlDbType.Regtype => "regtype", - NpgsqlDbType.Regconfig => "regconfig", - - // Misc types - NpgsqlDbType.Boolean => "boolean", - NpgsqlDbType.Bytea => "bytea", - NpgsqlDbType.Uuid => "uuid", - NpgsqlDbType.Varbit => "bit varying", - NpgsqlDbType.Bit => "bit", - NpgsqlDbType.Hstore => "hstore", - - NpgsqlDbType.Geometry => "geometry", - NpgsqlDbType.Geography => "geography", - - // Built-in range types - NpgsqlDbType.IntegerRange => "int4range", - NpgsqlDbType.BigIntRange => "int8range", - NpgsqlDbType.NumericRange => "numrange", - NpgsqlDbType.TimestampRange => "tsrange", - NpgsqlDbType.TimestampTzRange => "tstzrange", - NpgsqlDbType.DateRange => "daterange", - - // Built-in multirange types - NpgsqlDbType.IntegerMultirange => "int4multirange", - NpgsqlDbType.BigIntMultirange => "int8multirange", - NpgsqlDbType.NumericMultirange => "nummultirange", - NpgsqlDbType.TimestampMultirange => "tsmultirange", - NpgsqlDbType.TimestampTzMultirange => "tstzmultirange", - NpgsqlDbType.DateMultirange => "datemultirange", - - // Internal types - NpgsqlDbType.Int2Vector => "int2vector", - NpgsqlDbType.Oidvector => "oidvector", - NpgsqlDbType.PgLsn => "pg_lsn", - NpgsqlDbType.Tid => "tid", - NpgsqlDbType.InternalChar => "char", - - // Special types - NpgsqlDbType.Unknown => "unknown", - - _ => npgsqlDbType.HasFlag(NpgsqlDbType.Array) - ? NpgsqlDbTypeToDataTypeName(npgsqlDbType & ~NpgsqlDbType.Array) + "[]" - : null // e.g. ranges - }; - - public static NpgsqlDbType DataTypeNameToNpgsqlDbType(string typeName) + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - // Strip any facet information (length/precision/scale) - var parenIndex = typeName.IndexOf('('); - if (parenIndex > -1) - typeName = typeName.Substring(0, parenIndex); - - return typeName switch + lock (_sync) { - // Numeric types - "smallint" => NpgsqlDbType.Smallint, - "integer" or "int" => NpgsqlDbType.Integer, - "bigint" => NpgsqlDbType.Bigint, - "real" => NpgsqlDbType.Real, - "double precision" => NpgsqlDbType.Double, - "numeric" => NpgsqlDbType.Numeric, - "money" => NpgsqlDbType.Money, - - // Text types - "text" => NpgsqlDbType.Text, - "xml" => NpgsqlDbType.Xml, - "character varying" or "varchar" => NpgsqlDbType.Varchar, - "character" => NpgsqlDbType.Char, - "name" => NpgsqlDbType.Name, - "refcursor" => NpgsqlDbType.Refcursor, - "citext" => NpgsqlDbType.Citext, - "jsonb" => NpgsqlDbType.Jsonb, - "json" => NpgsqlDbType.Json, - "jsonpath" => NpgsqlDbType.JsonPath, - - // Date/time types - "timestamp without time zone" or "timestamp" => NpgsqlDbType.Timestamp, - "timestamp with time zone" or "timestamptz" => NpgsqlDbType.TimestampTz, - "date" => NpgsqlDbType.Date, - "time without time zone" or "timetz" => NpgsqlDbType.Time, - "time with time zone" or "time" => NpgsqlDbType.TimeTz, - "interval" => NpgsqlDbType.Interval, - - // Network types - "cidr" => NpgsqlDbType.Cidr, - "inet" => NpgsqlDbType.Inet, - "macaddr" => NpgsqlDbType.MacAddr, - "macaddr8" => NpgsqlDbType.MacAddr8, - - // Full-text search types - "tsquery" => NpgsqlDbType.TsQuery, - "tsvector" => NpgsqlDbType.TsVector, - - // Geometry types - "box" => NpgsqlDbType.Box, - "circle" => NpgsqlDbType.Circle, - "line" => NpgsqlDbType.Line, - "lseg" => NpgsqlDbType.LSeg, - "path" => NpgsqlDbType.Path, - "point" => NpgsqlDbType.Point, - "polygon" => NpgsqlDbType.Polygon, - - // LTree types - "lquery" => NpgsqlDbType.LQuery, - "ltree" => NpgsqlDbType.LTree, - "ltxtquery" => NpgsqlDbType.LTxtQuery, - - // UInt types - "oid" => NpgsqlDbType.Oid, - "xid" => NpgsqlDbType.Xid, - "xid8" => NpgsqlDbType.Xid8, - "cid" => NpgsqlDbType.Cid, - "regtype" => NpgsqlDbType.Regtype, - "regconfig" => NpgsqlDbType.Regconfig, - - // Misc types - "boolean" or "bool" => NpgsqlDbType.Boolean, - "bytea" => NpgsqlDbType.Bytea, - "uuid" => NpgsqlDbType.Uuid, - "bit varying" or "varbit" => NpgsqlDbType.Varbit, - "bit" => NpgsqlDbType.Bit, - "hstore" => NpgsqlDbType.Hstore, - - "geometry" => NpgsqlDbType.Geometry, - "geography" => NpgsqlDbType.Geography, - - // Built-in range types - "int4range" => NpgsqlDbType.IntegerRange, - "int8range" => NpgsqlDbType.BigIntRange, - "numrange" => NpgsqlDbType.NumericRange, - "tsrange" => NpgsqlDbType.TimestampRange, - "tstzrange" => NpgsqlDbType.TimestampTzRange, - "daterange" => NpgsqlDbType.DateRange, - - // Built-in multirange types - "int4multirange" => NpgsqlDbType.IntegerMultirange, - "int8multirange" => NpgsqlDbType.BigIntMultirange, - "nummultirange" => NpgsqlDbType.NumericMultirange, - "tsmultirange" => NpgsqlDbType.TimestampMultirange, - "tstzmultirange" => NpgsqlDbType.TimestampTzMultirange, - "datemultirange" => NpgsqlDbType.DateMultirange, - - // Internal types - "int2vector" => NpgsqlDbType.Int2Vector, - "oidvector" => NpgsqlDbType.Oidvector, - "pg_lsn" => NpgsqlDbType.PgLsn, - "tid" => NpgsqlDbType.Tid, - "char" => NpgsqlDbType.InternalChar, - - _ => typeName.EndsWith("[]", StringComparison.Ordinal) && - DataTypeNameToNpgsqlDbType(typeName.Substring(0, typeName.Length - 2)) is { } elementNpgsqlDbType && - elementNpgsqlDbType != NpgsqlDbType.Unknown - ? elementNpgsqlDbType | NpgsqlDbType.Array - : NpgsqlDbType.Unknown // e.g. ranges - }; + _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); + _typeMappingOptions = null; + return this; + } } - internal static NpgsqlDbType? DbTypeToNpgsqlDbType(DbType dbType) - => dbType switch - { - DbType.AnsiString => NpgsqlDbType.Text, - DbType.Binary => NpgsqlDbType.Bytea, - DbType.Byte => NpgsqlDbType.Smallint, - DbType.Boolean => NpgsqlDbType.Boolean, - DbType.Currency => NpgsqlDbType.Money, - DbType.Date => NpgsqlDbType.Date, - DbType.DateTime => LegacyTimestampBehavior ? NpgsqlDbType.Timestamp : NpgsqlDbType.TimestampTz, - DbType.Decimal => NpgsqlDbType.Numeric, - DbType.VarNumeric => NpgsqlDbType.Numeric, - DbType.Double => NpgsqlDbType.Double, - DbType.Guid => NpgsqlDbType.Uuid, - DbType.Int16 => NpgsqlDbType.Smallint, - DbType.Int32 => NpgsqlDbType.Integer, - DbType.Int64 => NpgsqlDbType.Bigint, - DbType.Single => NpgsqlDbType.Real, - DbType.String => NpgsqlDbType.Text, - DbType.Time => NpgsqlDbType.Time, - DbType.AnsiStringFixedLength => NpgsqlDbType.Text, - DbType.StringFixedLength => NpgsqlDbType.Text, - DbType.Xml => NpgsqlDbType.Xml, - DbType.DateTime2 => NpgsqlDbType.Timestamp, - DbType.DateTimeOffset => NpgsqlDbType.TimestampTz, - - DbType.Object => null, - DbType.SByte => null, - DbType.UInt16 => null, - DbType.UInt32 => null, - DbType.UInt64 => null, - - _ => throw new ArgumentOutOfRangeException(nameof(dbType), dbType, null) - }; - - internal static DbType NpgsqlDbTypeToDbType(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + lock (_sync) { - // Numeric types - NpgsqlDbType.Smallint => DbType.Int16, - NpgsqlDbType.Integer => DbType.Int32, - NpgsqlDbType.Bigint => DbType.Int64, - NpgsqlDbType.Real => DbType.Single, - NpgsqlDbType.Double => DbType.Double, - NpgsqlDbType.Numeric => DbType.Decimal, - NpgsqlDbType.Money => DbType.Currency, - - // Text types - NpgsqlDbType.Text => DbType.String, - NpgsqlDbType.Xml => DbType.Xml, - NpgsqlDbType.Varchar => DbType.String, - NpgsqlDbType.Char => DbType.String, - NpgsqlDbType.Name => DbType.String, - NpgsqlDbType.Refcursor => DbType.String, - NpgsqlDbType.Citext => DbType.String, - NpgsqlDbType.Jsonb => DbType.Object, - NpgsqlDbType.Json => DbType.Object, - NpgsqlDbType.JsonPath => DbType.String, - - // Date/time types - NpgsqlDbType.Timestamp => LegacyTimestampBehavior ? DbType.DateTime : DbType.DateTime2, - NpgsqlDbType.TimestampTz => LegacyTimestampBehavior ? DbType.DateTimeOffset : DbType.DateTime, - NpgsqlDbType.Date => DbType.Date, - NpgsqlDbType.Time => DbType.Time, - - // Misc data types - NpgsqlDbType.Bytea => DbType.Binary, - NpgsqlDbType.Boolean => DbType.Boolean, - NpgsqlDbType.Uuid => DbType.Guid, - - NpgsqlDbType.Unknown => DbType.Object, - - _ => DbType.Object - }; - - #endregion Static translation tables -} \ No newline at end of file + var result = _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + _typeMappingOptions = null; + return result; + } + } +} diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 3d0c46dd92..53088b33fd 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -1,6 +1,8 @@ -using System; +using System; using System.Diagnostics.CodeAnalysis; -using Npgsql.Internal.TypeHandling; +using System.Text.Json; +using System.Text.Json.Nodes; +using Npgsql.Internal; using Npgsql.NameTranslation; using NpgsqlTypes; @@ -41,7 +43,7 @@ public interface INpgsqlTypeMapper /// Defaults to . /// /// The .NET enum type to be mapped - INpgsqlTypeMapper MapEnum( + INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum; @@ -57,11 +59,54 @@ INpgsqlTypeMapper MapEnum( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - bool UnmapEnum( + bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum; + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper MapEnum( + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type clrType, + string? pgName = null, + INpgsqlNameTranslator? nameTranslator = null); + + /// + /// Removes an existing enum mapping. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + bool UnmapEnum( + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type clrType, + string? pgName = null, + INpgsqlNameTranslator? nameTranslator = null); + /// /// Maps a CLR type to a PostgreSQL composite type. /// @@ -82,8 +127,8 @@ bool UnmapEnum( /// Defaults to . /// /// The .NET type to be mapped - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - INpgsqlTypeMapper MapComposite( + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -98,8 +143,8 @@ INpgsqlTypeMapper MapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - bool UnmapComposite( + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -122,9 +167,9 @@ bool UnmapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] INpgsqlTypeMapper MapComposite( - Type clrType, + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -140,21 +185,75 @@ INpgsqlTypeMapper MapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] bool UnmapComposite( - Type clrType, + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); /// - /// Adds a type resolver factory, which produces resolvers that can add or modify support for PostgreSQL types. + /// Adds a type info resolver factory which can add or modify support for PostgreSQL types. /// Typically used by plugins. /// - /// The type resolver factory to be added. - void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory); + /// The type resolver factory to be added. + [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] + void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory); + + /// + /// Adds a DbType resolver factory which can change how DbType cases are mapped to PostgreSQL data types. + /// Typically used by plugins. + /// + /// The resolver factory to be added. + [Experimental(NpgsqlDiagnostics.DbTypeResolverExperimental)] + public void AddDbTypeResolverFactory(DbTypeResolverFactory factory); + + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// + INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOptions); + + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode( + "Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + INpgsqlTypeMapper EnableDynamicJson(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null); + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode( + "The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper EnableRecordsAsTuples(); + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode( + "The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper EnableUnmappedTypes(); /// /// Resets all mapping changes performed on this type mapper and reverts it to its original, starting state. /// void Reset(); -} \ No newline at end of file +} diff --git a/src/Npgsql/TypeMapping/PostgresTypeOIDs.cs b/src/Npgsql/TypeMapping/PostgresTypeOIDs.cs deleted file mode 100644 index e3f0d72c4d..0000000000 --- a/src/Npgsql/TypeMapping/PostgresTypeOIDs.cs +++ /dev/null @@ -1,112 +0,0 @@ -#pragma warning disable RS0016 -#pragma warning disable 1591 - -namespace Npgsql.TypeMapping; - -/// -/// Holds well-known, built-in PostgreSQL type OIDs. -/// -/// -/// Source: -/// -static class PostgresTypeOIDs -{ - // Numeric - public const uint Int8 = 20; - public const uint Float8 = 701; - public const uint Int4 = 23; - public const uint Numeric = 1700; - public const uint Float4 = 700; - public const uint Int2 = 21; - public const uint Money = 790; - - // Boolean - public const uint Bool = 16; - - // Geometric - public const uint Box = 603; - public const uint Circle = 718; - public const uint Line = 628; - public const uint LSeg = 601; - public const uint Path = 602; - public const uint Point = 600; - public const uint Polygon = 604; - - // Character - public const uint BPChar = 1042; - public const uint Text = 25; - public const uint Varchar = 1043; - public const uint Name = 19; - public const uint Char = 18; - - // Binary data - public const uint Bytea = 17; - - // Date/Time - public const uint Date = 1082; - public const uint Time = 1083; - public const uint Timestamp = 1114; - public const uint TimestampTz = 1184; - public const uint Interval = 1186; - public const uint TimeTz = 1266; - public const uint Abstime = 702; - - // Network address - public const uint Inet = 869; - public const uint Cidr = 650; - public const uint Macaddr = 829; - public const uint Macaddr8 = 774; - - // Bit string - public const uint Bit = 1560; - public const uint Varbit = 1562; - - // Text search - public const uint TsVector = 3614; - public const uint TsQuery = 3615; - public const uint Regconfig = 3734; - - // UUID - public const uint Uuid = 2950; - - // XML - public const uint Xml = 142; - - // JSON - public const uint Json = 114; - public const uint Jsonb = 3802; - public const uint JsonPath = 4072; - - // public - public const uint Refcursor = 1790; - public const uint Oidvector = 30; - public const uint Int2vector = 22; - public const uint Oid = 26; - public const uint Xid = 28; - public const uint Xid8 = 5069; - public const uint Cid = 29; - public const uint Regtype = 2206; - public const uint Tid = 27; - public const uint PgLsn = 3220; - - // Special - public const uint Record = 2249; - public const uint Void = 2278; - public const uint Unknown = 705; - - // Range types - public const uint Int4Range = 3904; - public const uint Int8Range = 3926; - public const uint NumRange = 3906; - public const uint TsRange = 3908; - public const uint TsTzRange = 3910; - public const uint DateRange = 3912; - - // Multirange types - public const uint Int4Multirange = 4451; - public const uint Int8Multirange = 4536; - public const uint NumMultirange = 4532; - public const uint TsMultirange = 4533; - public const uint TsTzMultirange = 4534; - public const uint DateMultirange = 4535; -} \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/TypeMapper.cs b/src/Npgsql/TypeMapping/TypeMapper.cs deleted file mode 100644 index 376725c90a..0000000000 --- a/src/Npgsql/TypeMapping/TypeMapper.cs +++ /dev/null @@ -1,529 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; -using System.Runtime.CompilerServices; -using Microsoft.Extensions.Logging; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using Npgsql.Util; -using NpgsqlTypes; - -namespace Npgsql.TypeMapping; - -sealed class TypeMapper -{ - internal NpgsqlConnector Connector { get; } - readonly object _writeLock = new(); - - NpgsqlDatabaseInfo? _databaseInfo; - - internal NpgsqlDatabaseInfo DatabaseInfo - => _databaseInfo ?? throw new InvalidOperationException("Internal error: this type mapper hasn't yet been bound to a database info object"); - - volatile TypeHandlerResolver[] _resolvers; - internal NpgsqlTypeHandler UnrecognizedTypeHandler { get; } - - readonly ConcurrentDictionary _handlersByOID = new(); - readonly ConcurrentDictionary _handlersByNpgsqlDbType = new(); - readonly ConcurrentDictionary _handlersByClrType = new(); - readonly ConcurrentDictionary _handlersByDataTypeName = new(); - - readonly Dictionary _userTypeMappings = new(); - readonly INpgsqlNameTranslator _defaultNameTranslator; - - readonly ILogger _commandLogger; - - #region Construction - - internal TypeMapper(NpgsqlConnector connector, INpgsqlNameTranslator defaultNameTranslator) - { - Connector = connector; - _defaultNameTranslator = defaultNameTranslator; - UnrecognizedTypeHandler = new UnknownTypeHandler(Connector.TextEncoding); - _resolvers = Array.Empty(); - _commandLogger = connector.LoggingConfiguration.CommandLogger; - } - - #endregion Constructors - - internal void Initialize( - NpgsqlDatabaseInfo databaseInfo, - List resolverFactories, - Dictionary userTypeMappings) - { - _databaseInfo = databaseInfo; - - var resolvers = new TypeHandlerResolver[resolverFactories.Count]; - for (var i = 0; i < resolverFactories.Count; i++) - resolvers[i] = resolverFactories[i].Create(Connector); - _resolvers = resolvers; - - foreach (var userTypeMapping in userTypeMappings.Values) - { - if (DatabaseInfo.TryGetPostgresTypeByName(userTypeMapping.PgTypeName, out var pgType)) - { - _handlersByOID[pgType.OID] = - _handlersByDataTypeName[pgType.FullName] = - _handlersByDataTypeName[pgType.Name] = - _handlersByClrType[userTypeMapping.ClrType] = userTypeMapping.CreateHandler(pgType, Connector); - - _userTypeMappings[pgType.OID] = new(npgsqlDbType: null, pgType.Name, userTypeMapping.ClrType); - } - } - } - - #region Type handler lookup - - /// - /// Looks up a type handler by its PostgreSQL type's OID. - /// - /// A PostgreSQL type OID - /// A type handler that can be used to encode and decode values. - internal NpgsqlTypeHandler ResolveByOID(uint oid) - => TryResolveByOID(oid, out var result) ? result : UnrecognizedTypeHandler; - - internal bool TryResolveByOID(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandler? handler) - { - if (_handlersByOID.TryGetValue(oid, out handler)) - return true; - - if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) - return false; - - lock (_writeLock) - { - if ((handler = ResolveByDataTypeNameCore(pgType.FullName)) is not null) - { - _handlersByOID[oid] = handler; - return true; - } - - if ((handler = ResolveByDataTypeNameCore(pgType.Name)) is not null) - { - _handlersByOID[oid] = handler; - return true; - } - - if ((handler = ResolveComplexTypeByDataTypeName(pgType.FullName, throwOnError: false)) is not null) - { - _handlersByOID[oid] = handler; - return true; - } - - handler = null; - return false; - } - } - - internal NpgsqlTypeHandler ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) - { - if (_handlersByNpgsqlDbType.TryGetValue(npgsqlDbType, out var handler)) - return handler; - - lock (_writeLock) - { - // First, try to resolve as a base type; translate the NpgsqlDbType to a PG data type name and look that up. - if (GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(npgsqlDbType) is { } dataTypeName) - { - foreach (var resolver in _resolvers) - { - try - { - if ((handler = resolver.ResolveByDataTypeName(dataTypeName)) is not null) - return _handlersByNpgsqlDbType[npgsqlDbType] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, - $"Type resolver {resolver.GetType().Name} threw exception while resolving NpgsqlDbType {npgsqlDbType}"); - } - } - } - - if (npgsqlDbType.HasFlag(NpgsqlDbType.Array)) - { - var elementHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Array); - - if (elementHandler.PostgresType.Array is not { } pgArrayType) - throw new ArgumentException( - $"No array type could be found in the database for element {elementHandler.PostgresType}"); - - return _handlersByNpgsqlDbType[npgsqlDbType] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } - - if (npgsqlDbType.HasFlag(NpgsqlDbType.Range)) - { - var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Range); - - if (subtypeHandler.PostgresType.Range is not { } pgRangeType) - throw new ArgumentException( - $"No range type could be found in the database for subtype {subtypeHandler.PostgresType}"); - - return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateRangeHandler(pgRangeType); - } - - if (npgsqlDbType.HasFlag(NpgsqlDbType.Multirange)) - { - var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Multirange); - - if (subtypeHandler.PostgresType.Range?.Multirange is not { } pgMultirangeType) - throw new ArgumentException(string.Format(NpgsqlStrings.NoMultirangeTypeFound, subtypeHandler.PostgresType)); - - return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType); - } - - throw new NpgsqlException($"The NpgsqlDbType '{npgsqlDbType}' isn't present in your database. " + - "You may need to install an extension or upgrade to a newer version."); - } - } - - internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) - => ResolveByDataTypeNameCore(typeName) ?? ResolveComplexTypeByDataTypeName(typeName, throwOnError: true)!; - - NpgsqlTypeHandler? ResolveByDataTypeNameCore(string typeName) - { - if (_handlersByDataTypeName.TryGetValue(typeName, out var handler)) - return handler; - - lock (_writeLock) - { - foreach (var resolver in _resolvers) - { - try - { - if ((handler = resolver.ResolveByDataTypeName(typeName)) is not null) - return _handlersByDataTypeName[typeName] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving data type name {typeName}"); - } - } - - return null; - } - } - - NpgsqlTypeHandler? ResolveComplexTypeByDataTypeName(string typeName, bool throwOnError) - { - lock (_writeLock) - { - if (DatabaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) - throw new NotSupportedException("Could not find PostgreSQL type " + typeName); - - switch (pgType) - { - case PostgresArrayType pgArrayType: - { - var elementHandler = ResolveByOID(pgArrayType.Element.OID); - return _handlersByDataTypeName[typeName] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } - - case PostgresRangeType pgRangeType: - { - var subtypeHandler = ResolveByOID(pgRangeType.Subtype.OID); - return _handlersByDataTypeName[typeName] = subtypeHandler.CreateRangeHandler(pgRangeType); - } - - case PostgresMultirangeType pgMultirangeType: - { - var subtypeHandler = ResolveByOID(pgMultirangeType.Subrange.Subtype.OID); - return _handlersByDataTypeName[typeName] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType); - } - - case PostgresEnumType pgEnumType: - { - // A mapped enum would have been registered in _extraHandlersByDataTypeName and bound above - this is unmapped. - return _handlersByDataTypeName[typeName] = - new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding); - } - - case PostgresDomainType pgDomainType: - return _handlersByDataTypeName[typeName] = ResolveByOID(pgDomainType.BaseType.OID); - - case PostgresBaseType pgBaseType: - return throwOnError - ? throw new NotSupportedException($"PostgreSQL type '{pgBaseType}' isn't supported by Npgsql") - : null; - - case PostgresCompositeType pgCompositeType: - // We don't support writing unmapped composite types, but we do support reading unmapped composite types. - // So when we're invoked from ResolveOID (which is the read path), we don't want to raise an exception. - return throwOnError - ? throw new NotSupportedException( - $"Composite type '{pgCompositeType}' must be mapped with Npgsql before being used, see the docs.") - : null; - - default: - throw new ArgumentOutOfRangeException($"Unhandled PostgreSQL type type: {pgType.GetType()}"); - } - } - } - - internal NpgsqlTypeHandler ResolveByValue(T value) - { - if (value is null) - return ResolveByClrType(typeof(T)); - - if (typeof(T).IsValueType) - { - // Attempt to resolve value types generically via the resolver. This is the efficient fast-path, where we don't even need to - // do a dictionary lookup (the JIT elides type checks in generic methods for value types) - NpgsqlTypeHandler? handler; - - foreach (var resolver in _resolvers) - { - try - { - if ((handler = resolver.ResolveValueTypeGenerically(value)) is not null) - return handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {typeof(T)}"); - } - } - - // There may still be some value types not resolved by the above, e.g. NpgsqlRange - } - - // Value types would have been resolved above, so this is a reference type - no JIT optimizations. - // We go through the regular logic (and there's no boxing). - return ResolveByValue((object)value); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal NpgsqlTypeHandler ResolveByValue(object value) - { - // We resolve as follows: - // 1. Cached by-type lookup (fast path). This will work for almost all types after the very first resolution. - // 2. Value-dependent type lookup (e.g. DateTime by Kind) via the resolvers. This includes complex types (e.g. array/range - // over DateTime), and the results cannot be cached. - // 3. Uncached by-type lookup (for the very first resolution of a given type) - - var type = value.GetType(); - if (_handlersByClrType.TryGetValue(type, out var handler)) - return handler; - - foreach (var resolver in _resolvers) - { - try - { - if ((handler = resolver.ResolveValueDependentValue(value)) is not null) - return handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); - } - } - - // ResolveByClrType either throws, or resolves a handler and caches it in _handlersByClrType (where it would be found above the - // next time we resolve this type) - return ResolveByClrType(type); - } - - // TODO: This is needed as a separate method only because of binary COPY, see #3957 - internal NpgsqlTypeHandler ResolveByClrType(Type type) - { - if (_handlersByClrType.TryGetValue(type, out var handler)) - return handler; - - lock (_writeLock) - { - foreach (var resolver in _resolvers) - { - try - { - if ((handler = resolver.ResolveByClrType(type)) is not null) - return _handlersByClrType[type] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); - } - } - - // Try to see if it is an array type - var arrayElementType = GetArrayListElementType(type); - if (arrayElementType is not null) - { - // With PG14, we map arrays over range types to PG multiranges by default, not to regular arrays over ranges. - if (arrayElementType.IsGenericType && - arrayElementType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>) && - DatabaseInfo.Version.IsGreaterOrEqual(14)) - { - var subtypeType = arrayElementType.GetGenericArguments()[0]; - - return ResolveByClrType(subtypeType) is - { PostgresType : { Range : { Multirange: { } pgMultirangeType } } } subtypeHandler - ? _handlersByClrType[type] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } - - if (ResolveByClrType(arrayElementType) is not { } elementHandler) - throw new ArgumentException($"Array type over CLR type {arrayElementType.Name} isn't supported by Npgsql"); - - if (elementHandler.PostgresType.Array is not { } pgArrayType) - throw new ArgumentException( - $"No array type could be found in the database for element {elementHandler.PostgresType}"); - - return _handlersByClrType[type] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } - - if (Nullable.GetUnderlyingType(type) is { } underlyingType && ResolveByClrType(underlyingType) is { } underlyingHandler) - return _handlersByClrType[type] = underlyingHandler; - - if (type.IsEnum) - { - return DatabaseInfo.GetPostgresTypeByName(GetPgName(type, _defaultNameTranslator)) is PostgresEnumType pgEnumType - ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) - : throw new NotSupportedException( - $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + - "Consider mapping the enum before usage, refer to the documentation for more details."); - } - - // TODO: We can make the following compatible with reflection-free mode by having NpgsqlRange implement some interface, and - // check for that. - if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - var subtypeType = type.GetGenericArguments()[0]; - - return ResolveByClrType(subtypeType) is { PostgresType : { Range : { } pgRangeType } } subtypeHandler - ? _handlersByClrType[type] = subtypeHandler.CreateRangeHandler(pgRangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } - - if (typeof(IEnumerable).IsAssignableFrom(type)) - throw new NotSupportedException("IEnumerable parameters are not supported, pass an array or List instead"); - - throw new NotSupportedException($"The CLR type {type} isn't natively supported by Npgsql or your PostgreSQL. " + - $"To use it with a PostgreSQL composite you need to specify {nameof(NpgsqlParameter.DataTypeName)} or to map it, please refer to the documentation."); - } - - static Type? GetArrayListElementType(Type type) - { - var typeInfo = type.GetTypeInfo(); - if (typeInfo.IsArray) - return GetUnderlyingType(type.GetElementType()!); // The use of bang operator is justified here as Type.GetElementType() only returns null for the Array base class which can't be mapped in a useful way. - - var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); - if (ilist != null) - return GetUnderlyingType(ilist.GetGenericArguments()[0]); - - if (typeof(IList).IsAssignableFrom(type)) - throw new NotSupportedException("Non-generic IList is a supported parameter, but the NpgsqlDbType parameter must be set on the parameter"); - - return null; - - Type GetUnderlyingType(Type t) - => Nullable.GetUnderlyingType(t) ?? t; - } - } - - internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMappingInfo? mapping) - { - foreach (var resolver in _resolvers) - if ((mapping = resolver.GetMappingByDataTypeName(pgType.FullName)) is not null) - return true; - - foreach (var resolver in _resolvers) - if ((mapping = resolver.GetMappingByDataTypeName(pgType.Name)) is not null) - return true; - - switch (pgType) - { - case PostgresArrayType pgArrayType: - if (TryGetMapping(pgArrayType.Element, out var elementMapping)) - { - mapping = new(elementMapping.NpgsqlDbType | NpgsqlDbType.Array, pgType.DisplayName); - return true; - } - - break; - - case PostgresRangeType pgRangeType: - { - if (TryGetMapping(pgRangeType.Subtype, out var subtypeMapping)) - { - mapping = new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Range, pgType.DisplayName); - return true; - } - - break; - } - - case PostgresMultirangeType pgMultirangeType: - { - if (TryGetMapping(pgMultirangeType.Subrange.Subtype, out var subtypeMapping)) - { - mapping = new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Multirange, pgType.DisplayName); - return true; - } - - break; - } - - case PostgresDomainType pgDomainType: - if (TryGetMapping(pgDomainType.BaseType, out var baseMapping)) - { - mapping = new(baseMapping.NpgsqlDbType, pgType.DisplayName, baseMapping.ClrTypes); - return true; - } - - break; - - case PostgresEnumType or PostgresCompositeType: - return _userTypeMappings.TryGetValue(pgType.OID, out mapping); - } - - mapping = null; - return false; - } - - #endregion Type handler lookup - - internal (NpgsqlDbType? npgsqlDbType, PostgresType postgresType) GetTypeInfoByOid(uint oid) - { - if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) - throw new InvalidOperationException($"Couldn't find PostgreSQL type with OID {oid}"); - - foreach (var resolver in _resolvers) - if (resolver.GetMappingByDataTypeName(pgType.FullName) is { } mapping) - return (mapping.NpgsqlDbType, pgType); - - foreach (var resolver in _resolvers) - if (resolver.GetMappingByDataTypeName(pgType.Name) is { } mapping) - return (mapping.NpgsqlDbType, pgType); - - switch (pgType) - { - case PostgresArrayType pgArrayType: - var (elementNpgsqlDbType, _) = GetTypeInfoByOid(pgArrayType.Element.OID); - if (elementNpgsqlDbType.HasValue) - return new(elementNpgsqlDbType | NpgsqlDbType.Array, pgType); - break; - - case PostgresDomainType pgDomainType: - var (baseNpgsqlDbType, _) = GetTypeInfoByOid(pgDomainType.BaseType.OID); - return new(baseNpgsqlDbType, pgType); - } - - return (null, pgType); - } - - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); -} diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs new file mode 100644 index 0000000000..3b7928bbd2 --- /dev/null +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -0,0 +1,275 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal; +using Npgsql.Internal.Composites; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.NameTranslation; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.TypeMapping; + +/// +/// The base class for user type mappings. +/// +public abstract class UserTypeMapping +{ + /// + /// The name of the PostgreSQL type that this mapping is for. + /// + public string PgTypeName { get; } + /// + /// The CLR type that this mapping is for. + /// + public Type ClrType { get; } + + internal UserTypeMapping(string pgTypeName, Type type) + => (PgTypeName, ClrType) = (pgTypeName, type); + + internal abstract void AddMapping(TypeInfoMappingCollection mappings); + internal abstract void AddArrayMapping(TypeInfoMappingCollection mappings); +} + +sealed class UserTypeMapper : PgTypeInfoResolverFactory +{ + readonly List _mappings; + public IList Items => _mappings; + + INpgsqlNameTranslator _defaultNameTranslator = NpgsqlSnakeCaseNameTranslator.Instance; + public INpgsqlNameTranslator DefaultNameTranslator + { + get => _defaultNameTranslator; + set + { + ArgumentNullException.ThrowIfNull(value); + _defaultNameTranslator = value; + } + } + + UserTypeMapper(IEnumerable mappings) => _mappings = [..mappings]; + public UserTypeMapper() => _mappings = []; + + public UserTypeMapper Clone() => new(_mappings) { DefaultNameTranslator = DefaultNameTranslator }; + + public UserTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + { + Unmap(typeof(TEnum), out var resolvedName, pgName, nameTranslator); + Items.Add(new EnumMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); + return this; + } + + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + => Unmap(typeof(TEnum), out _, pgName, nameTranslator ?? DefaultNameTranslator); + + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "MapEnum TEnum has less DAM annotations than clrType.")] + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public UserTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (!clrType.IsEnum || !clrType.IsValueType) + throw new ArgumentException("Type must be a concrete Enum", nameof(clrType)); + + var openMethod = typeof(UserTypeMapper).GetMethod(nameof(MapEnum), [typeof(string), typeof(INpgsqlNameTranslator)])!; + var method = openMethod.MakeGenericMethod(clrType); + method.Invoke(this, [pgName, nameTranslator]); + return this; + } + + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)]Type clrType,string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (!clrType.IsEnum || !clrType.IsValueType) + throw new ArgumentException("Type must be a concrete Enum", nameof(clrType)); + + return Unmap(clrType, out _, pgName, nameTranslator ?? DefaultNameTranslator); + } + + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public UserTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class + { + Unmap(typeof(T), out var resolvedName, pgName, nameTranslator); + Items.Add(new CompositeMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); + return this; + } + + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public UserTypeMapper MapStructComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct + { + Unmap(typeof(T), out var resolvedName, pgName, nameTranslator); + Items.Add(new StructCompositeMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); + return this; + } + + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "MapStructComposite and MapComposite have identical DAM annotations to clrType.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public UserTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (clrType.IsConstructedGenericType && clrType.GetGenericTypeDefinition() == typeof(Nullable<>)) + throw new ArgumentException("Cannot map nullable.", nameof(clrType)); + + var openMethod = typeof(UserTypeMapper).GetMethod( + clrType.IsValueType ? nameof(MapStructComposite) : nameof(MapComposite), + [typeof(string), typeof(INpgsqlNameTranslator)])!; + + var method = openMethod.MakeGenericMethod(clrType); + + method.Invoke(this, [pgName, nameTranslator]); + + return this; + } + + public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class + => UnmapComposite(typeof(T), pgName, nameTranslator); + + public bool UnmapStructComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct + => UnmapComposite(typeof(T), pgName, nameTranslator); + + public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => Unmap(clrType, out _, pgName, nameTranslator); + + bool Unmap(Type type, out string resolvedName, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (pgName != null && pgName.Trim() == "") + throw new ArgumentException("pgName can't be empty", nameof(pgName)); + + nameTranslator ??= DefaultNameTranslator; + resolvedName = pgName ??= GetPgName(type, nameTranslator); + + UserTypeMapping? toRemove = null; + foreach (var item in _mappings) + if (item.PgTypeName == pgName) + toRemove = item; + + return toRemove is not null && _mappings.Remove(toRemove); + } + + static string GetPgName(Type type, INpgsqlNameTranslator nameTranslator) + => type.GetCustomAttribute()?.PgName + ?? nameTranslator.TranslateTypeName(type.Name); + + public override IPgTypeInfoResolver CreateResolver() => new Resolver([.._mappings]); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver([.._mappings]); + + class Resolver(List userTypeMappings) : IPgTypeInfoResolver + { + protected readonly List _userTypeMappings = userTypeMappings; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var userTypeMapping in _userTypeMappings) + userTypeMapping.AddMapping(mappings); + + return mappings; + } + } + + sealed class ArrayResolver(List userTypeMappings) : Resolver(userTypeMappings), IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var userTypeMapping in _userTypeMappings) + userTypeMapping.AddArrayMapping(mappings); + + return mappings; + } + } + + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + sealed class CompositeMapping< + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | + DynamicallyAccessedMemberTypes.PublicProperties)] + T>(string pgTypeName, INpgsqlNameTranslator nameTranslator) : UserTypeMapping(pgTypeName, typeof(T)) + where T : class + { + internal override void AddMapping(TypeInfoMappingCollection mappings) + => mappings.AddType(PgTypeName, (options, mapping, _) => + { + var pgType = mapping.GetPgType(options); + if (pgType is not PostgresCompositeType compositeType) + throw new InvalidOperationException("Composite mapping must be to a composite type"); + + return mapping.CreateInfo(options, new CompositeConverter( + ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, nameTranslator, options))); + }, isDefault: true); + + internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddArrayType(PgTypeName); + } + + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + sealed class StructCompositeMapping< + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | + DynamicallyAccessedMemberTypes.PublicProperties)] + T>(string pgTypeName, INpgsqlNameTranslator nameTranslator) : UserTypeMapping(pgTypeName, typeof(T)) + where T : struct + { + internal override void AddMapping(TypeInfoMappingCollection mappings) + => mappings.AddStructType(PgTypeName, (options, mapping, requiresDataTypeName) => + { + var pgType = mapping.GetPgType(options); + if (pgType is not PostgresCompositeType compositeType) + throw new InvalidOperationException("Composite mapping must be to a composite type"); + + return mapping.CreateInfo(options, new CompositeConverter( + ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, nameTranslator, options))); + }, isDefault: true); + + internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddStructArrayType(PgTypeName); + } + + internal abstract class EnumMapping( + string pgTypeName, + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] Type enumClrType, + INpgsqlNameTranslator nameTranslator) + : UserTypeMapping(pgTypeName, enumClrType) + { + internal INpgsqlNameTranslator NameTranslator { get; } = nameTranslator; + } + + sealed class EnumMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum> : EnumMapping + where TEnum : struct, Enum + { + readonly Dictionary _enumToLabel = new(); + readonly Dictionary _labelToEnum = new(); + + public EnumMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) + : base(pgTypeName, typeof(TEnum), nameTranslator) + { + foreach (var field in typeof(TEnum).GetFields(BindingFlags.Static | BindingFlags.Public)) + { + var attribute = (PgNameAttribute?)field.GetCustomAttribute(typeof(PgNameAttribute), false); + var enumName = attribute is null + ? nameTranslator.TranslateMemberName(field.Name) + : attribute.PgName; + var enumValue = (TEnum)field.GetValue(null)!; + + _enumToLabel[enumValue] = enumName; + _labelToEnum[enumName] = enumValue; + } + } + + internal override void AddMapping(TypeInfoMappingCollection mappings) + => mappings.AddStructType(PgTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new EnumConverter(_enumToLabel, _labelToEnum, options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + + internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddStructArrayType(PgTypeName); + } +} + diff --git a/src/Npgsql/UnpooledDataSource.cs b/src/Npgsql/UnpooledDataSource.cs index a1ff6659bd..55ce5d65af 100644 --- a/src/Npgsql/UnpooledDataSource.cs +++ b/src/Npgsql/UnpooledDataSource.cs @@ -1,19 +1,14 @@ using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; -using System.Transactions; using Npgsql.Internal; using Npgsql.Util; namespace Npgsql; -sealed class UnpooledDataSource : NpgsqlDataSource +sealed class UnpooledDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) + : NpgsqlDataSource(settings, dataSourceConfig, reportMetrics: true) { - public UnpooledDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) - : base(settings, dataSourceConfig) - { - } - volatile int _numConnectors; internal override (int Total, int Idle, int Busy) Statistics => (_numConnectors, 0, _numConnectors); @@ -26,7 +21,7 @@ internal override async ValueTask Get( CheckDisposed(); var connector = new NpgsqlConnector(this, conn); - await connector.Open(timeout, async, cancellationToken); + await connector.Open(timeout, async, cancellationToken).ConfigureAwait(false); Interlocked.Increment(ref _numConnectors); return connector; } @@ -47,14 +42,7 @@ internal override void Return(NpgsqlConnector connector) connector.Close(); } - internal override void Clear() {} - - internal override bool TryRentEnlistedPending(Transaction transaction, NpgsqlConnection connection, - [NotNullWhen(true)] out NpgsqlConnector? connector) + public override void Clear() { - connector = null; - return false; } - - internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) => false; -} \ No newline at end of file +} diff --git a/src/Npgsql/Util/GSSStream.cs b/src/Npgsql/Util/GSSStream.cs new file mode 100644 index 0000000000..b6f843f315 --- /dev/null +++ b/src/Npgsql/Util/GSSStream.cs @@ -0,0 +1,177 @@ +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.IO; +using System.Net.Security; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Util; + +// For more detailed explanation of communication protocol +// See https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-GSSAPI +sealed class GSSStream : Stream +{ + // At most, postgres supports GSS messages up to 16kb + // We use the recommended value of 8kb for the write buffer + // Which will result in messages of slightly larger than 8kb + const int MaxWriteMessageSizeLimit = 8 * 1024; + const int MaxReadMessageSizeLimit = 16 * 1024; + + readonly Stream _stream; + readonly NegotiateAuthentication _authentication; + + readonly ArrayBufferWriter _writeBuffer; + readonly byte[] _writeLengthBuffer; + + readonly byte[] _readBuffer; + int _readPosition; + int _leftToRead; + + internal GSSStream(Stream stream, NegotiateAuthentication authentication) + { + _stream = stream; + _authentication = authentication; + // While we guarantee that unencrypted messages are at most 8kb + // Encrypting them will result in messages slightly larger than the original size + // Which is why the initial capacity has an additional 2kb of free space + _writeBuffer = new ArrayBufferWriter(MaxWriteMessageSizeLimit + 2048); + _writeLengthBuffer = new byte[4]; + _readBuffer = new byte[MaxReadMessageSizeLimit]; + } + + public override void Write(ReadOnlySpan buffer) + { + var start = 0; + while (start != buffer.Length) + { + var lengthToWrite = Math.Min(buffer.Length - start, MaxWriteMessageSizeLimit); + var result = _authentication.Wrap( + buffer.Slice(start, lengthToWrite), + _writeBuffer, + _authentication.IsEncrypted, + out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while encrypting buffer: {result}"); + + var written = _writeBuffer.WrittenMemory; + Unsafe.WriteUnaligned(ref _writeLengthBuffer[0], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(written.Length) : written.Length); + + _stream.Write(_writeLengthBuffer); + _stream.Write(_writeBuffer.WrittenMemory.Span); + + _writeBuffer.ResetWrittenCount(); + start += lengthToWrite; + } + } + + public override void Write(byte[] buffer, int offset, int count) + => Write(buffer.AsSpan(offset, count)); + + public override async ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + { + var start = 0; + while (start != buffer.Length) + { + var lengthToWrite = Math.Min(buffer.Length - start, MaxWriteMessageSizeLimit); + var result = _authentication.Wrap( + buffer.Slice(start, lengthToWrite).Span, + _writeBuffer, + _authentication.IsEncrypted, + out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while encrypting buffer: {result}"); + + var written = _writeBuffer.WrittenMemory; + Unsafe.WriteUnaligned(ref _writeLengthBuffer[0], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(written.Length) : written.Length); + + await _stream.WriteAsync(_writeLengthBuffer, cancellationToken).ConfigureAwait(false); + await _stream.WriteAsync(_writeBuffer.WrittenMemory, cancellationToken).ConfigureAwait(false); + + _writeBuffer.ResetWrittenCount(); + start += lengthToWrite; + } + } + + public override async Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await WriteAsync(buffer.AsMemory(offset, count), cancellationToken).ConfigureAwait(false); + + public override void Flush() => _stream.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) => _stream.FlushAsync(cancellationToken); + + public override int Read(Span buffer) + { + if (_leftToRead == 0) + { + _stream.ReadExactly(_readBuffer.AsSpan(0, 4)); + var messageLength = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref _readBuffer[0])) + : Unsafe.ReadUnaligned(ref _readBuffer[0]); + var messageBuffer = _readBuffer.AsSpan(0, messageLength); + _stream.ReadExactly(messageBuffer); + var result = _authentication.UnwrapInPlace(messageBuffer, out _readPosition, out _leftToRead, out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while decrypting buffer: {result}"); + } + + var maxRead = Math.Min(_leftToRead, buffer.Length); + _readBuffer.AsSpan(_readPosition, maxRead).CopyTo(buffer); + _readPosition += maxRead; + _leftToRead -= maxRead; + return maxRead; + } + + public override int Read(byte[] buffer, int offset, int count) + => Read(buffer.AsSpan(offset, count)); + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + if (_leftToRead == 0) + { + await _stream.ReadExactlyAsync(_readBuffer.AsMemory(0, 4), cancellationToken).ConfigureAwait(false); + var messageLength = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref _readBuffer[0])) + : Unsafe.ReadUnaligned(ref _readBuffer[0]); + var messageBuffer = _readBuffer.AsMemory(0, messageLength); + await _stream.ReadExactlyAsync(messageBuffer, cancellationToken).ConfigureAwait(false); + var result = _authentication.UnwrapInPlace(messageBuffer.Span, out _readPosition, out _leftToRead, out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while decrypting buffer: {result}"); + } + + var maxRead = Math.Min(_leftToRead, buffer.Length); + _readBuffer.AsMemory(_readPosition, maxRead).CopyTo(buffer); + _readPosition += maxRead; + _leftToRead -= maxRead; + return maxRead; + } + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await ReadAsync(buffer.AsMemory(offset, count), cancellationToken).ConfigureAwait(false); + + public override void Close() => _stream.Close(); + + protected override void Dispose(bool disposing) + { + _authentication.Dispose(); + _stream.Dispose(); + } + + public override ValueTask DisposeAsync() => _stream.DisposeAsync(); + + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); + + public override bool CanRead => true; + public override bool CanWrite => true; + public override bool CanSeek => false; + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } +} diff --git a/src/Npgsql/Util/IterationIndices.cs b/src/Npgsql/Util/IterationIndices.cs new file mode 100644 index 0000000000..943b22fe1d --- /dev/null +++ b/src/Npgsql/Util/IterationIndices.cs @@ -0,0 +1,111 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; + +namespace Npgsql.Util; + +// Many array cannot be pooled until https://github.com/dotnet/runtime/issues/125325 is addressed. +struct IterationIndices +{ + long _indicesSum; + + public long IndicesSum => _indicesSum; + + public int Rank { get; private init; } + public int One => (int)_indicesSum; + public int[]? Many { get; private init; } + public int Last => Many is null ? (int)_indicesSum : Many[^1]; + + // Also accept the count for the most common case where we have a single dimension array to avoid the bounds check. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryAdvance(int lastCount, ReadOnlySpan counts) + { + Debug.Assert(counts.IsEmpty || lastCount == counts[^1]); + + ref var lastIndex = ref Many is null ? ref GetIntRefFromLong(ref _indicesSum) : ref Many![^1]; + + if (lastIndex < lastCount - 1) + { + lastIndex++; + // For Rank > 1 _indicesSum is a linear element counter independent from Many[^1], so we need to advance it explicitly. + if (Many is not null) + _indicesSum++; + return true; + } + + return Many is not null && IncrementOrCarry(counts); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + bool IncrementOrCarry(ReadOnlySpan counts) + { + Debug.Assert(counts.Length > 1); + Debug.Assert(Rank > 1); + + // Find the first dimension from the end that isn't at or past its length, increment it and bring all previous dimensions to zero. + for (var dim = Rank - 1; dim >= 0; dim--) + { + if (this[dim] >= counts[dim] - 1) + continue; + + Many.AsSpan().Slice(dim + 1).Clear(); + this[dim]++; + _indicesSum++; + return true; + } + + // We're done if we can't find any dimension that isn't at its length. + return false; + } + + public ref int this[int index] + { + [UnscopedRef] + get + { + switch (Rank) + { + case 0: + ThrowHelper.ThrowIndexOutOfRangeException("Cannot index into a 0-dimensional array."); + return ref Unsafe.NullRef(); + case 1: + Debug.Assert(index is 0); + Debug.Assert(Many is null); + return ref GetIntRefFromLong(ref _indicesSum); + default: + return ref Many![index]; + } + } + } + + public void Reset() + { + _indicesSum = 0; + if (Many is not null) + Array.Clear(Many); + } + + public static IterationIndices Create(int dimensions) + { + switch (dimensions) + { + case 0: + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dimensions), "Cannot create a 0-dimensional array."); + return default; + case 1: + return new() { Rank = dimensions }; + default: + return new() + { + Rank = dimensions, + Many = new int[dimensions], + }; + } + } + + static ref int GetIntRefFromLong(ref long value) + => ref BitConverter.IsLittleEndian + ? ref Unsafe.As(ref value) + : ref Unsafe.Add(ref Unsafe.As(ref value), 1); // Take high 32 bits. +} diff --git a/src/Npgsql/Util/LoggingEnumerable.cs b/src/Npgsql/Util/LoggingEnumerable.cs new file mode 100644 index 0000000000..4b36ce19a6 --- /dev/null +++ b/src/Npgsql/Util/LoggingEnumerable.cs @@ -0,0 +1,36 @@ +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Npgsql.Util; + +// For logging batches we have to use a wrapper for parameters, otherwise they're logged as object[]. See https://github.com/npgsql/npgsql/issues/6078. +sealed class LoggingEnumerable(IEnumerable wrappedEnumerable) : IEnumerable +{ + public IEnumerator GetEnumerator() => wrappedEnumerable.GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => ((IEnumerable)wrappedEnumerable).GetEnumerator(); + + public override string ToString() + { + var sb = new StringBuilder(); + + sb.Append('['); + + var appended = false; + + foreach (var o in wrappedEnumerable) + { + if (appended) + sb.Append(", "); + else + appended = true; + + sb.Append(o); + } + + sb.Append(']'); + + return sb.ToString(); + } +} diff --git a/src/Npgsql/Util/ManualResetValueTaskSource.cs b/src/Npgsql/Util/ManualResetValueTaskSource.cs deleted file mode 100644 index 55e45aa225..0000000000 --- a/src/Npgsql/Util/ManualResetValueTaskSource.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using System.Threading.Tasks.Sources; - -namespace Npgsql.Util; - -sealed class ManualResetValueTaskSource : IValueTaskSource, IValueTaskSource -{ - ManualResetValueTaskSourceCore _core; // mutable struct; do not make this readonly - - public bool RunContinuationsAsynchronously { get => _core.RunContinuationsAsynchronously; set => _core.RunContinuationsAsynchronously = value; } - public short Version => _core.Version; - public void Reset() => _core.Reset(); - public void SetResult(T result) => _core.SetResult(result); - public void SetException(Exception error) => _core.SetException(error); - - public T GetResult(short token) => _core.GetResult(token); - void IValueTaskSource.GetResult(short token) => _core.GetResult(token); - public ValueTaskSourceStatus GetStatus(short token) => _core.GetStatus(token); - public void OnCompleted(Action continuation, object? state, short token, ValueTaskSourceOnCompletedFlags flags) - => _core.OnCompleted(continuation, state, token, flags); -} \ No newline at end of file diff --git a/src/Npgsql/Util/NpgsqlTimeout.cs b/src/Npgsql/Util/NpgsqlTimeout.cs new file mode 100644 index 0000000000..79c44d6c4b --- /dev/null +++ b/src/Npgsql/Util/NpgsqlTimeout.cs @@ -0,0 +1,52 @@ +using System; +using System.Threading; +using Npgsql.Internal; + +namespace Npgsql.Util; + +/// +/// Represents a timeout that will expire at some point. +/// +public readonly struct NpgsqlTimeout +{ + readonly DateTime _expiration; + + internal static readonly NpgsqlTimeout Infinite = new(TimeSpan.Zero); + + internal NpgsqlTimeout(TimeSpan expiration) + => _expiration = expiration > TimeSpan.Zero + ? DateTime.UtcNow + expiration + : expiration == TimeSpan.Zero + ? DateTime.MaxValue + : DateTime.MinValue; + + internal void Check() + { + if (HasExpired) + ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); + } + + internal void CheckAndApply(NpgsqlConnector connector) + { + if (!IsSet) + return; + + var timeLeft = CheckAndGetTimeLeft(); + // Set the remaining timeout on the read and write buffers + connector.ReadBuffer.Timeout = connector.WriteBuffer.Timeout = timeLeft; + } + + internal bool IsSet => _expiration != DateTime.MaxValue; + + internal bool HasExpired => DateTime.UtcNow >= _expiration; + + internal TimeSpan CheckAndGetTimeLeft() + { + if (!IsSet) + return Timeout.InfiniteTimeSpan; + var timeLeft = _expiration - DateTime.UtcNow; + if (timeLeft <= TimeSpan.Zero) + Check(); + return timeLeft; + } +} diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs deleted file mode 100644 index b3746f2a65..0000000000 --- a/src/Npgsql/Util/PGUtil.cs +++ /dev/null @@ -1,232 +0,0 @@ -using Npgsql.Internal; -using System; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Text; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql.Util; - -static class Statics -{ -#if DEBUG - internal static bool LegacyTimestampBehavior; - internal static bool DisableDateTimeInfinityConversions; -#else - internal static readonly bool LegacyTimestampBehavior; - internal static readonly bool DisableDateTimeInfinityConversions; -#endif - - static Statics() - { - LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; - DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; - } - - internal static T Expect(IBackendMessage msg, NpgsqlConnector connector) - { - if (msg.GetType() != typeof(T)) - ThrowIfMsgWrongType(msg, connector); - - return (T)msg; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static T ExpectAny(IBackendMessage msg, NpgsqlConnector connector) - { - if (msg is T t) - return t; - - ThrowIfMsgWrongType(msg, connector); - return default; - } - - [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] - static void ThrowIfMsgWrongType(IBackendMessage msg, NpgsqlConnector connector) - => throw connector.Break( - new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug.")); - - internal static DeferDisposable Defer(Action action) => new(action); - internal static DeferDisposable Defer(Action action, T arg) => new(action, arg); - internal static DeferDisposable Defer(Action action, T1 arg1, T2 arg2) => new(action, arg1, arg2); - // internal static AsyncDeferDisposable DeferAsync(Func func) => new AsyncDeferDisposable(func); - internal static AsyncDeferDisposable DeferAsync(Func func) => new(func); - - internal readonly struct DeferDisposable : IDisposable - { - readonly Action _action; - public DeferDisposable(Action action) => _action = action; - public void Dispose() => _action(); - } - - internal readonly struct DeferDisposable : IDisposable - { - readonly Action _action; - readonly T _arg; - public DeferDisposable(Action action, T arg) - { - _action = action; - _arg = arg; - } - public void Dispose() => _action(_arg); - } - - internal readonly struct DeferDisposable : IDisposable - { - readonly Action _action; - readonly T1 _arg1; - readonly T2 _arg2; - public DeferDisposable(Action action, T1 arg1, T2 arg2) - { - _action = action; - _arg1 = arg1; - _arg2 = arg2; - } - public void Dispose() => _action(_arg1, _arg2); - } - - internal readonly struct AsyncDeferDisposable : IAsyncDisposable - { - readonly Func _func; - public AsyncDeferDisposable(Func func) => _func = func; - public async ValueTask DisposeAsync() => await _func(); - } -} - -// ReSharper disable once InconsistentNaming -static class PGUtil -{ - internal static readonly UTF8Encoding UTF8Encoding = new(false, true); - internal static readonly UTF8Encoding RelaxedUTF8Encoding = new(false, false); - - internal const int BitsInInt = sizeof(int) * 8; - - internal static void ValidateBackendMessageCode(BackendMessageCode code) - { - switch (code) - { - case BackendMessageCode.AuthenticationRequest: - case BackendMessageCode.BackendKeyData: - case BackendMessageCode.BindComplete: - case BackendMessageCode.CloseComplete: - case BackendMessageCode.CommandComplete: - case BackendMessageCode.CopyData: - case BackendMessageCode.CopyDone: - case BackendMessageCode.CopyBothResponse: - case BackendMessageCode.CopyInResponse: - case BackendMessageCode.CopyOutResponse: - case BackendMessageCode.DataRow: - case BackendMessageCode.EmptyQueryResponse: - case BackendMessageCode.ErrorResponse: - case BackendMessageCode.FunctionCall: - case BackendMessageCode.FunctionCallResponse: - case BackendMessageCode.NoData: - case BackendMessageCode.NoticeResponse: - case BackendMessageCode.NotificationResponse: - case BackendMessageCode.ParameterDescription: - case BackendMessageCode.ParameterStatus: - case BackendMessageCode.ParseComplete: - case BackendMessageCode.PasswordPacket: - case BackendMessageCode.PortalSuspended: - case BackendMessageCode.ReadyForQuery: - case BackendMessageCode.RowDescription: - return; - default: - throw new NpgsqlException("Unknown message code: " + code); - } - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static int RotateShift(int val, int shift) - => (val << shift) | (val >> (BitsInInt - shift)); - - internal static readonly Task TrueTask = Task.FromResult(true); - internal static readonly Task FalseTask = Task.FromResult(false); -} - -enum FormatCode : short -{ - Text = 0, - Binary = 1 -} - -static class EnumerableExtensions -{ - internal static string Join(this IEnumerable values, string separator) - { - return string.Join(separator, values); - } -} - -static class ExceptionExtensions -{ - internal static Exception UnwrapAggregate(this Exception exception) - => exception is AggregateException agg ? agg.InnerException! : exception; -} - -/// -/// Represents a timeout that will expire at some point. -/// -public readonly struct NpgsqlTimeout -{ - readonly DateTime _expiration; - - internal static NpgsqlTimeout Infinite = new(TimeSpan.Zero); - - internal NpgsqlTimeout(TimeSpan expiration) - => _expiration = expiration > TimeSpan.Zero - ? DateTime.UtcNow + expiration - : expiration == TimeSpan.Zero - ? DateTime.MaxValue - : DateTime.MinValue; - - internal void Check() - { - if (HasExpired) - throw new TimeoutException(); - } - - internal void CheckAndApply(NpgsqlConnector connector) - { - if (!IsSet) - return; - - var timeLeft = CheckAndGetTimeLeft(); - // Set the remaining timeout on the read and write buffers - connector.ReadBuffer.Timeout = connector.WriteBuffer.Timeout = timeLeft; - - // Note that we set UserTimeout as well, otherwise the read timeout will get overwritten in ReadMessage - // Note also that we must set the read buffer's timeout directly (above), since the SSL handshake - // reads data directly from the buffer, without going through ReadMessage. - connector.UserTimeout = (int) Math.Ceiling(timeLeft.TotalMilliseconds); - } - - internal bool IsSet => _expiration != DateTime.MaxValue; - - internal bool HasExpired => DateTime.UtcNow >= _expiration; - - internal TimeSpan CheckAndGetTimeLeft() - { - if (!IsSet) - return Timeout.InfiniteTimeSpan; - var timeLeft = _expiration - DateTime.UtcNow; - if (timeLeft <= TimeSpan.Zero) - Check(); - return timeLeft; - } -} - -static class MethodInfos -{ - internal static readonly ConstructorInfo InvalidCastExceptionCtor = - typeof(InvalidCastException).GetConstructor(new[] { typeof(string) })!; - - internal static readonly MethodInfo StringFormat = - typeof(string).GetMethod(nameof(string.Format), new[] { typeof(string), typeof(object) })!; - - internal static readonly MethodInfo ObjectGetType = - typeof(object).GetMethod(nameof(GetType), new Type[0])!; -} \ No newline at end of file diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index c61a07b99e..3218a7e629 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.Threading; using static System.Threading.Timeout; @@ -13,17 +13,17 @@ namespace Npgsql.Util; /// we need to make sure that an existing cancellation token source hasn't been cancelled, /// every time we start it (see https://github.com/dotnet/runtime/issues/4694). /// -sealed class ResettableCancellationTokenSource : IDisposable +sealed class ResettableCancellationTokenSource(TimeSpan timeout) : IDisposable { bool isDisposed; - public TimeSpan Timeout { get; set; } + public TimeSpan Timeout { get; set; } = timeout; CancellationTokenSource _cts = new(); - CancellationTokenRegistration _registration; + CancellationTokenRegistration? _registration; /// - /// Used, so we wouldn't concurently use the cts for the cancellation, while it's being disposed + /// Used, so we wouldn't concurrently use the cts for the cancellation, while it's being disposed /// readonly object lockObject = new(); @@ -31,9 +31,9 @@ sealed class ResettableCancellationTokenSource : IDisposable bool _isRunning; #endif - public ResettableCancellationTokenSource() => Timeout = InfiniteTimeSpan; - - public ResettableCancellationTokenSource(TimeSpan timeout) => Timeout = timeout; + public ResettableCancellationTokenSource() : this(InfiniteTimeSpan) + { + } /// /// Set the timeout on the wrapped @@ -48,10 +48,21 @@ public CancellationToken Start(CancellationToken cancellationToken = default) #if DEBUG Debug.Assert(!_isRunning); #endif - _cts.CancelAfter(Timeout); - if (_cts.IsCancellationRequested) + lock (lockObject) { - lock (lockObject) + // if there was an attempt to cancel while the connector was breaking + // we do nothing and return the default token + // as we're going to fail while reading or writing anyway + if (isDisposed) + { +#if DEBUG + _isRunning = true; +#endif + return CancellationToken.None; + } + + _cts.CancelAfter(Timeout); + if (_cts.IsCancellationRequested) { _cts.Dispose(); _cts = new CancellationTokenSource(Timeout); @@ -69,31 +80,48 @@ public CancellationToken Start(CancellationToken cancellationToken = default) /// Restart the timeout on the wrapped without reinitializing it, /// even if is already set to /// - public void RestartTimeoutWithoutReset() => _cts.CancelAfter(Timeout); + public void RestartTimeoutWithoutReset() + { + lock (lockObject) + { + // if there was an attempt to cancel while the connector was breaking + // we do nothing and return the default token + // as we're going to fail while reading or writing anyway + if (!isDisposed) + _cts.CancelAfter(Timeout); + } + } /// /// Reset the wrapper to contain a unstarted and uncancelled /// in order make sure the next call to will not invalidate /// the cancellation token. /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// /// The from the wrapped - public CancellationToken Reset(CancellationToken cancellationToken = default) + public CancellationToken Reset() { - _registration.Dispose(); - _cts.CancelAfter(InfiniteTimeSpan); - if (_cts.IsCancellationRequested) + _registration?.Dispose(); + _registration = null; + lock (lockObject) { - lock (lockObject) + // if there was an attempt to cancel while the connector was breaking + // we do nothing and return + // as we're going to fail while reading or writing anyway + if (isDisposed) + { +#if DEBUG + _isRunning = false; +#endif + return CancellationToken.None; + } + + _cts.CancelAfter(InfiniteTimeSpan); + if (_cts.IsCancellationRequested) { _cts.Dispose(); _cts = new CancellationTokenSource(); } } - if (cancellationToken.CanBeCanceled) - _registration = cancellationToken.Register(cts => ((CancellationTokenSource)cts!).Cancel(), _cts); #if DEBUG _isRunning = false; #endif @@ -128,8 +156,15 @@ public void ResetCts() /// public void Stop() { - _registration.Dispose(); - _cts.CancelAfter(InfiniteTimeSpan); + _registration?.Dispose(); + _registration = null; + lock (lockObject) + { + // if there was an attempt to cancel while the connector was breaking + // we do nothing + if (!isDisposed) + _cts.CancelAfter(InfiniteTimeSpan); + } #if DEBUG _isRunning = false; #endif @@ -186,10 +221,10 @@ public void Dispose() lock (lockObject) { - _registration.Dispose(); + _registration?.Dispose(); _cts.Dispose(); isDisposed = true; } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Util/Statics.cs b/src/Npgsql/Util/Statics.cs new file mode 100644 index 0000000000..c21d10fbe5 --- /dev/null +++ b/src/Npgsql/Util/Statics.cs @@ -0,0 +1,100 @@ +using Npgsql.Internal; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Text; + +namespace Npgsql.Util; + +static class Statics +{ + internal static readonly bool EnableAssertions; +#if DEBUG + internal static bool LegacyTimestampBehavior; + internal static bool DisableDateTimeInfinityConversions; +#else + internal static readonly bool LegacyTimestampBehavior; + internal static readonly bool DisableDateTimeInfinityConversions; +#endif + + static Statics() + { + EnableAssertions = AppContext.TryGetSwitch("Npgsql.EnableAssertions", out var enabled) && enabled; + LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out enabled) && enabled; + DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; + } + + /// Returns the escaped SQL representation of a string literal. + /// The identifier to be escaped. + internal static string EscapeLiteral(string literal) + { + // There is no support for escape sequences in quoted values for PostgreSQL, so replacing ' is enough. + // (to be able to use escaped characters an alternative syntax exists, it requires E to appear directly before the opening quote) + return literal.Replace("'", "''"); + } + + internal static T Expect(IBackendMessage msg, NpgsqlConnector connector) + { + if (msg.GetType() != typeof(T)) + ThrowIfMsgWrongType(msg, connector); + + return (T)msg; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static T ExpectAny(IBackendMessage msg, NpgsqlConnector connector) + { + if (msg is T t) + return t; + + ThrowIfMsgWrongType(msg, connector); + return default; + } + + [DoesNotReturn] + static void ThrowIfMsgWrongType(IBackendMessage msg, NpgsqlConnector connector) + => throw connector.Break( + new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug.")); + + [Conditional("DEBUG")] + internal static void ValidateBackendMessageCode(BackendMessageCode code) + { + switch (code) + { + case BackendMessageCode.AuthenticationRequest: + case BackendMessageCode.BackendKeyData: + case BackendMessageCode.BindComplete: + case BackendMessageCode.CloseComplete: + case BackendMessageCode.CommandComplete: + case BackendMessageCode.CopyData: + case BackendMessageCode.CopyDone: + case BackendMessageCode.CopyBothResponse: + case BackendMessageCode.CopyInResponse: + case BackendMessageCode.CopyOutResponse: + case BackendMessageCode.DataRow: + case BackendMessageCode.EmptyQueryResponse: + case BackendMessageCode.ErrorResponse: + case BackendMessageCode.FunctionCall: + case BackendMessageCode.FunctionCallResponse: + case BackendMessageCode.NoData: + case BackendMessageCode.NoticeResponse: + case BackendMessageCode.NotificationResponse: + case BackendMessageCode.ParameterDescription: + case BackendMessageCode.ParameterStatus: + case BackendMessageCode.ParseComplete: + case BackendMessageCode.PasswordPacket: + case BackendMessageCode.PortalSuspended: + case BackendMessageCode.ReadyForQuery: + case BackendMessageCode.RowDescription: + return; + default: + ThrowUnknownMessageCode(code); + return; + } + + static void ThrowUnknownMessageCode(BackendMessageCode code) + => ThrowHelper.ThrowNpgsqlException($"Unknown message code: {code}"); + } +} diff --git a/src/Npgsql/Util/StrongBox.cs b/src/Npgsql/Util/StrongBox.cs new file mode 100644 index 0000000000..d72c3140e0 --- /dev/null +++ b/src/Npgsql/Util/StrongBox.cs @@ -0,0 +1,41 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Util; + +abstract class StrongBox +{ + private protected StrongBox() { } + public abstract bool HasValue { get; } + public abstract object? Value { get; set; } + public abstract void Clear(); +} + +sealed class StrongBox : StrongBox +{ + bool _hasValue; + + [MaybeNull] T _typedValue; + [MaybeNull] + public T TypedValue { + get => _typedValue; + set + { + _hasValue = true; + _typedValue = value; + } + } + + public override bool HasValue => _hasValue; + + public override object? Value + { + get => TypedValue; + set => TypedValue = (T)value!; + } + + public override void Clear() + { + _hasValue = false; + TypedValue = default!; + } +} diff --git a/src/Npgsql/Util/SubReadStream.cs b/src/Npgsql/Util/SubReadStream.cs new file mode 100644 index 0000000000..d5eb760d2f --- /dev/null +++ b/src/Npgsql/Util/SubReadStream.cs @@ -0,0 +1,211 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Util; + +// Adapted from https://github.com/dotnet/runtime/blob/83adfae6a6273d8fb4c69554aa3b1cc7cbf01c71/src/libraries/System.IO.Compression/src/System/IO/Compression/ZipCustomStreams.cs#L221 +sealed class SubReadStream : Stream +{ + readonly long _start; + long _position; + readonly long _end; + readonly Stream? _stream; + readonly ArraySegment _buffer; + readonly bool _canSeek; + bool _isDisposed; + internal bool IsDisposed => _isDisposed; + + public SubReadStream(Stream source, long maxLength) + { + _start = -1; + _position = 0; + _end = maxLength; + _stream = source; + _canSeek = false; + } + + public SubReadStream(Stream source, long startPosition, long maxLength) + { + _start = startPosition; + _position = startPosition; + _end = startPosition + maxLength; + _stream = source; + _canSeek = source.CanSeek; + } + + public SubReadStream(byte[] buffer, int offset, int count) + { + _buffer = new ArraySegment(buffer, offset, count); + _start = 0; + _position = 0; + _end = count; + _canSeek = true; + } + + public override long Length + { + get + { + ThrowIfDisposed(); + + if (!_canSeek) + throw new NotSupportedException(); + + return _end - _start; + } + } + + public override long Position + { + get + { + ThrowIfDisposed(); + + if (!_canSeek) + throw new NotSupportedException(); + + return _position - _start; + } + set + { + ThrowIfDisposed(); + + if (!_canSeek) + throw new NotSupportedException(); + + ArgumentOutOfRangeException.ThrowIfNegative(value); + _position = _start + value; + } + } + + public override bool CanRead => _buffer.Array is not null || _stream!.CanRead; + + public override bool CanSeek => _canSeek; + + public override bool CanWrite => false; + + void ThrowIfDisposed() + => ObjectDisposedException.ThrowIf(_isDisposed, this); + + void ThrowIfCantRead() + { + if (!CanRead) + throw new NotSupportedException(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + ValidateBufferArguments(buffer, offset, count); + return Read(new Span(buffer, offset, count)); + } + + public override int Read(Span destination) + { + ThrowIfDisposed(); + + var count = destination.Length; + if (_position + count > _end) + count = (int)(_end - _position); + + if (count <= 0) + return 0; + + if (_buffer.Array is not null) + { + _buffer.AsSpan((int)_position, count).CopyTo(destination); + _position += count; + return count; + } + + ThrowIfCantRead(); + if (_canSeek && _stream!.Position != _position) + _stream.Seek(_position, SeekOrigin.Begin); + + var ret = _stream!.Read(destination.Slice(0, count)); + _position += ret; + return ret; + } + + public override int ReadByte() + { + Span b = stackalloc byte[1]; + return Read(b) == 1 ? b[0] : -1; + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ValidateBufferArguments(buffer, offset, count); + return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + } + + public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + ThrowIfDisposed(); + + if (_buffer.Array is not null) + return new(Read(buffer.Span)); + + ThrowIfCantRead(); + if (_canSeek && _stream!.Position != _position) + _stream.Seek(_position, SeekOrigin.Begin); + + if (_position > _end - buffer.Length) + buffer = buffer.Slice(0, (int)(_end - _position)); + + return Core(buffer, cancellationToken); + + async ValueTask Core(Memory buffer, CancellationToken cancellationToken) + { + var ret = await _stream!.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _position += ret; + return ret; + } + } + + public override long Seek(long offset, SeekOrigin origin) + { + ThrowIfDisposed(); + + if (!_canSeek) + throw new NotSupportedException(); + + var newPosition = origin switch + { + SeekOrigin.Begin => _start + offset, + SeekOrigin.Current => _position + offset, + SeekOrigin.End => _end + offset, + _ => throw new ArgumentOutOfRangeException(nameof(origin)) + }; + + if (newPosition < _start) + throw new IOException("An attempt was made to move the position before the beginning of the stream."); + + _position = newPosition; + return _position - _start; + } + + public override void SetLength(long value) + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + public override void Flush() => ThrowIfDisposed(); + + protected override void Dispose(bool disposing) + { + if (disposing && !_isDisposed) + { + _isDisposed = true; + } + base.Dispose(disposing); + } +} diff --git a/src/Npgsql/Util/TaskSchedulerAwaitable.cs b/src/Npgsql/Util/TaskSchedulerAwaitable.cs new file mode 100644 index 0000000000..1b6d2c5647 --- /dev/null +++ b/src/Npgsql/Util/TaskSchedulerAwaitable.cs @@ -0,0 +1,35 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Util; + +readonly struct TaskSchedulerAwaitable(TaskScheduler scheduler) : ICriticalNotifyCompletion +{ + public void GetResult() {} + public bool IsCompleted => false; + + public void OnCompleted(Action continuation) + { + var task = Task.Factory.StartNew(continuation, CancellationToken.None, + TaskCreationOptions.DenyChildAttach, + scheduler: scheduler); + + // Exceptions should never happen as the continuation should be the async statemachine. + // It normally does its own error handling through the returned task unless it's an async void returning method. + // In which case we should absolutely let it bubble up to TaskScheduler.UnobservedTaskException. + OnFaulted(task); + + [Conditional("DEBUG")] + static void OnFaulted(Task task) + { + task.ContinueWith(t => Debug.Fail("Task scheduler task threw an unobserved exception"), TaskContinuationOptions.OnlyOnFaulted); + } + } + + public void UnsafeOnCompleted(Action continuation) => OnCompleted(continuation); + + public TaskSchedulerAwaitable GetAwaiter() => this; +} diff --git a/src/Npgsql/Util/TypeExtensions.cs b/src/Npgsql/Util/TypeExtensions.cs new file mode 100644 index 0000000000..a8a298c419 --- /dev/null +++ b/src/Npgsql/Util/TypeExtensions.cs @@ -0,0 +1,22 @@ +using System; + +namespace Npgsql.Util; + +static class TypeExtensions +{ + extension(Type type) + { + /// + /// Determines whether this type and are in a subtype relationship, + /// i.e. whether one is assignable to the other in either direction. + /// + /// + /// Returns when the types are identical, when one inherits from or implements the other, + /// or more generally when an implicit reference or boxing conversion exists between them. + /// + /// The type to check the relationship with. + /// if either type is assignable to the other; otherwise, . + public bool IsInSubtypeRelationshipWith(Type other) => + type.IsAssignableTo(other) || other.IsAssignableTo(type); + } +} diff --git a/src/Npgsql/Util/VersionExtensions.cs b/src/Npgsql/Util/VersionExtensions.cs index 4501dd78d2..d2fbd67dea 100644 --- a/src/Npgsql/Util/VersionExtensions.cs +++ b/src/Npgsql/Util/VersionExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; namespace Npgsql.Util; diff --git a/src/Npgsql/VolatileResourceManager.cs b/src/Npgsql/VolatileResourceManager.cs index 84c28868e3..92a716f2e2 100644 --- a/src/Npgsql/VolatileResourceManager.cs +++ b/src/Npgsql/VolatileResourceManager.cs @@ -1,5 +1,5 @@ using System; -using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Transactions; using Microsoft.Extensions.Logging; @@ -17,6 +17,7 @@ namespace Npgsql; sealed class VolatileResourceManager : ISinglePhaseNotification { NpgsqlConnector _connector; + NpgsqlDataSource _dataSource; Transaction _transaction; readonly string _txId; NpgsqlTransaction _localTx = null!; @@ -31,6 +32,7 @@ sealed class VolatileResourceManager : ISinglePhaseNotification internal VolatileResourceManager(NpgsqlConnection connection, Transaction transaction) { _connector = connection.Connector!; + _dataSource = connection.NpgsqlDataSource; _transaction = transaction; // _tx gets disposed by System.Transactions at some point, but we want to be able to log its local ID _txId = transaction.TransactionInformation.LocalIdentifier; @@ -96,6 +98,8 @@ public void Prepare(PreparingEnlistment preparingEnlistment) } } + [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "Changing Enlist to be false does not affect potentially trimmed out functionality.")] + [UnconditionalSuppressMessage("Aot", "IL3050", Justification = "Changing Enlist to be false does not cause dynamic codegen.")] public void Commit(Enlistment enlistment) { CheckDisposed(); @@ -121,7 +125,11 @@ public void Commit(Enlistment enlistment) // if the user continues to use their connection after disposing the scope, and the MSDTC // requests a commit at that exact time. // To avoid this, we open a new connection for performing the 2nd phase. - using var conn2 = (NpgsqlConnection)((ICloneable)_connector.Connection).Clone(); + var settings = _connector.Connection.Settings.Clone(); + // Set Enlist to false because we might be in TransactionScope and we can't prepare transaction while being in an open transaction + // see #5246 + settings.Enlist = false; + using var conn2 = _connector.Connection.CloneWith(settings.ConnectionString); conn2.Open(); var connector = conn2.Connector!; @@ -271,8 +279,10 @@ void Dispose() { // We're here for connections which were closed before their TransactionScope completes. // These need to be closed now. - // We should return the connector to the pool only if we've successfully removed it from the pending list - if (_connector.TryRemovePendingEnlistedConnector(_transaction)) + // We should return the connector to the pool only if we've successfully removed it from the pending list. + // Note that we remove it from the NpgsqlDataSource bound to connection and not to connector + // because of NpgsqlMultiHostDataSource which has its own list to which connection adds connectors. + if (_dataSource.TryRemovePendingEnlistedConnector(_connector, _transaction)) _connector.Return(); } @@ -283,10 +293,7 @@ void Dispose() #pragma warning restore CS8625 void CheckDisposed() - { - if (_isDisposed) - throw new ObjectDisposedException(nameof(VolatileResourceManager)); - } + => ObjectDisposedException.ThrowIf(_isDisposed, this); #endregion @@ -301,4 +308,4 @@ static System.Data.IsolationLevel ConvertIsolationLevel(IsolationLevel isolation IsolationLevel.Snapshot => System.Data.IsolationLevel.Snapshot, _ => System.Data.IsolationLevel.Unspecified }; -} \ No newline at end of file +} diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs deleted file mode 100644 index 594d5bb5d0..0000000000 --- a/src/Shared/CodeAnalysis.cs +++ /dev/null @@ -1,175 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; - -#pragma warning disable 1591 - -namespace System.Diagnostics.CodeAnalysis -{ -#if NETSTANDARD2_0 - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property)] - sealed class AllowNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property)] - sealed class DisallowNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Method)] - sealed class DoesNotReturnAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Parameter)] - sealed class DoesNotReturnIfAttribute : Attribute - { - public DoesNotReturnIfAttribute(bool parameterValue) => ParameterValue = parameterValue; - public bool ParameterValue { get; } - } - - [AttributeUsageAttribute(AttributeTargets.Assembly | AttributeTargets.Class | AttributeTargets.Constructor | AttributeTargets.Event | AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Struct, AllowMultiple = false)] - sealed class ExcludeFromCodeCoverageAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue)] - sealed class MaybeNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Parameter)] - sealed class MaybeNullWhenAttribute : Attribute - { - public MaybeNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; - public bool ReturnValue { get; } - } - - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue)] - sealed class NotNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, AllowMultiple = true)] - sealed class NotNullIfNotNullAttribute : Attribute - { - public NotNullIfNotNullAttribute(string parameterName) => ParameterName = parameterName; - public string ParameterName { get; } - } - - [AttributeUsageAttribute(AttributeTargets.Parameter)] - sealed class NotNullWhenAttribute : Attribute - { - public NotNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; - public bool ReturnValue { get; } - } -#endif - -#if !NET5_0_OR_GREATER - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, AllowMultiple = true, Inherited = false)] - sealed class MemberNotNullAttribute : Attribute - { - public MemberNotNullAttribute(string member) => Members = new string[] - { - member - }; - - public MemberNotNullAttribute(params string[] members) => Members = members; - - public string[] Members { get; } - } - - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, AllowMultiple = true, Inherited = false)] - sealed class MemberNotNullWhenAttribute : Attribute - { - public MemberNotNullWhenAttribute(bool returnValue, string member) - { - ReturnValue = returnValue; - Members = new string[1] { member }; - } - - public MemberNotNullWhenAttribute(bool returnValue, params string[] members) - { - ReturnValue = returnValue; - Members = members; - } - - public bool ReturnValue { get; } - - public string[] Members { get; } - } - - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class, Inherited = false)] - sealed class RequiresUnreferencedCodeAttribute : Attribute - { - public RequiresUnreferencedCodeAttribute(string message) - { - Message = message; - } - - public string Message { get; } - - public string? Url { get; set; } - } - - [AttributeUsage( - AttributeTargets.Field | AttributeTargets.ReturnValue | AttributeTargets.GenericParameter | - AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.Method | - AttributeTargets.Class | AttributeTargets.Interface | AttributeTargets.Struct, - Inherited = false)] - sealed class DynamicallyAccessedMembersAttribute : Attribute - { - public DynamicallyAccessedMembersAttribute(DynamicallyAccessedMemberTypes memberTypes) - { - MemberTypes = memberTypes; - } - - public DynamicallyAccessedMemberTypes MemberTypes { get; } - } - - [Flags] - enum DynamicallyAccessedMemberTypes - { - None = 0, - PublicParameterlessConstructor = 0x0001, - PublicConstructors = 0x0002 | PublicParameterlessConstructor, - NonPublicConstructors = 0x0004, - PublicMethods = 0x0008, - NonPublicMethods = 0x0010, - PublicFields = 0x0020, - NonPublicFields = 0x0040, - PublicNestedTypes = 0x0080, - NonPublicNestedTypes = 0x0100, - PublicProperties = 0x0200, - NonPublicProperties = 0x0400, - PublicEvents = 0x0800, - NonPublicEvents = 0x1000, - Interfaces = 0x2000, - All = ~None - } - - [AttributeUsage(AttributeTargets.All, Inherited = false, AllowMultiple = true)] - sealed class UnconditionalSuppressMessageAttribute : Attribute - { - public UnconditionalSuppressMessageAttribute(string category, string checkId) - { - Category = category; - CheckId = checkId; - } - - public string Category { get; } - public string CheckId { get; } - public string? Scope { get; set; } - public string? Target { get; set; } - public string? MessageId { get; set; } - public string? Justification { get; set; } - } -#endif -} - -#if !NET5_0_OR_GREATER -namespace System.Runtime.CompilerServices -{ - internal static class IsExternalInit {} -} -#endif \ No newline at end of file diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 4aa685bc22..9625beb334 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -1,13 +1,13 @@ - + - net7.0;netcoreapp3.1 - net7.0 + net10.0 false - $(NoWarn);CA2252 + + $(NoWarn);CA2252;NPG9001 diff --git a/test/MStatDumper/MStatDumper.csproj b/test/MStatDumper/MStatDumper.csproj new file mode 100644 index 0000000000..6405431678 --- /dev/null +++ b/test/MStatDumper/MStatDumper.csproj @@ -0,0 +1,13 @@ + + + + Exe + enable + disable + + + + + + + diff --git a/test/MStatDumper/Program.cs b/test/MStatDumper/Program.cs new file mode 100644 index 0000000000..13f0b4ab3e --- /dev/null +++ b/test/MStatDumper/Program.cs @@ -0,0 +1,368 @@ +using Mono.Cecil; +using Mono.Cecil.Rocks; + +namespace MStatDumper +{ + internal class Program + { + static void Main(string[] args) + { + if (args.Length == 0) + { + throw new Exception("Must provide the path to mstat file. It's in {project}/obj/Release/{TFM}/{os}/native/{project}.mstat"); + } + + var markDownStyleOutput = args.Length > 1 && args[1] == "md"; + + var asm = AssemblyDefinition.ReadAssembly(args[0]); + var globalType = (TypeDefinition)asm.MainModule.LookupToken(0x02000001); + + var versionMajor = asm.Name.Version.Major; + + var types = globalType.Methods.First(x => x.Name == "Types"); + var typeStats = GetTypes(versionMajor, types).ToList(); + var typeSize = typeStats.Sum(x => x.Size); + var typesByModules = typeStats.GroupBy(x => x.Type.Scope).Select(x => new { x.Key.Name, Sum = x.Sum(x => x.Size) }).ToList(); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine($"Types Total Size {typeSize:n0}"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in typesByModules.OrderByDescending(x => x.Sum)) + { + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine($"// ********** Types Total Size {typeSize:n0}"); + foreach (var m in typesByModules.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"{m.Name,-70} {m.Sum,9:n0}"); + } + Console.WriteLine("// **********"); + } + + Console.WriteLine(); + + var methods = globalType.Methods.First(x => x.Name == "Methods"); + var methodStats = GetMethods(versionMajor, methods).ToList(); + var methodSize = methodStats.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize); + var methodsByModules = methodStats.GroupBy(x => x.Method.DeclaringType.Scope).Select(x => new { x.Key.Name, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize) }).ToList(); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine($"Methods Total Size {methodSize:n0}"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in methodsByModules.OrderByDescending(x => x.Sum)) + { + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine($"// ********** Methods Total Size {methodSize:n0}"); + foreach (var m in methodsByModules.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"{m.Name,-70} {m.Sum,9:n0}"); + } + Console.WriteLine("// **********"); + } + + Console.WriteLine(); + + string FindNamespace(TypeReference type) + { + var current = type; + while (true) + { + if (!string.IsNullOrEmpty(current.Namespace)) + { + return current.Namespace; + } + + if (current.DeclaringType == null) + { + return current.Name; + } + + current = current.DeclaringType; + } + } + + var methodsByNamespace = methodStats.Select(x => new TypeStats { Type = x.Method.DeclaringType, Size = x.Size + x.GcInfoSize + x.EhInfoSize }).Concat(typeStats).GroupBy(x => FindNamespace(x.Type)).Select(x => new { x.Key, Sum = x.Sum(x => x.Size) }).ToList(); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine("Size By Namespace"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in methodsByNamespace.OrderByDescending(x => x.Sum)) + { + var name = m.Key + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine("// ********** Size By Namespace"); + foreach (var m in methodsByNamespace.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"{m.Key,-70} {m.Sum,9:n0}"); + } + Console.WriteLine("// **********"); + } + + Console.WriteLine(); + + var blobs = globalType.Methods.First(x => x.Name == "Blobs"); + var blobStats = GetBlobs(blobs).ToList(); + var blobSize = blobStats.Sum(x => x.Size); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine($"Blobs Total Size {blobSize:n0}"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in blobStats.OrderByDescending(x => x.Size)) + { + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Size:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine($"// ********** Blobs Total Size {blobSize:n0}"); + foreach (var m in blobStats.OrderByDescending(x => x.Size)) + { + Console.WriteLine($"{m.Name,-70} {m.Size,9:n0}"); + } + Console.WriteLine("// **********"); + } + + if (markDownStyleOutput) + { + var methodsByClass = methodStats + .Where(x => x.Method.DeclaringType.Scope.Name == "Npgsql") + .GroupBy(x => GetClassName(x.Method)) + .OrderByDescending(x => x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize)) + .Take(100) + .ToList(); + + static string GetClassName(MethodReference methodReference) + { + var type = methodReference.DeclaringType.DeclaringType ?? methodReference.DeclaringType; + return type.Namespace + "." + type.Name; + } + + Console.WriteLine("
"); + Console.WriteLine("Top 100 Npgsql Classes By Methods Size"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size | Total Instantiations |"); + Console.WriteLine("| --- | --- | --- |"); + foreach (var m in methodsByClass + .Select(x => new { Name = x.Key, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize), Count = x.Count() }) + .OrderByDescending(x => x.Sum)) + { + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} | {m.Count} |"); + } + + Console.WriteLine(); + Console.WriteLine("
"); + + foreach (var g in methodsByClass + .OrderByDescending(x => x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize))) + { + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine($"\"{g.Key}\" Methods ({g.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize):n0} bytes)"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size | Instantiations |"); + Console.WriteLine("| --- | --- | --- |"); + foreach (var m in g + .GroupBy(x => GetMethodName(x.Method)) + .Select(x => new { Name = x.Key, Size = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize), Count = x.Count()}) + .OrderByDescending(x => x.Size)) + { + var methodName = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {methodName} | {m.Size:n0} | {m.Count} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("
"); + + static string GetMethodName(MethodReference methodReference) + { + if (methodReference.DeclaringType.DeclaringType is null) + { + return methodReference.Name; + } + + return methodReference.DeclaringType.Name; + } + } + + Console.WriteLine(); + Console.WriteLine("
"); + + var filteredTypeStats = GetTypes(versionMajor, types) + .Where(x => x.Type.Scope.Name == "Npgsql") + .GroupBy(x => x.Type.Name) + .OrderByDescending(x => x.Sum(x => x.Size)) + .Take(100) + .ToList(); + Console.WriteLine("
"); + Console.WriteLine($"Top 100 Npgsql Types By Size"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size | Instantiations |"); + Console.WriteLine("| --- | --- | --- |"); + foreach (var m in filteredTypeStats) + { + var name = m.Key + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum(x => x.Size):n0} | {m.Count()} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + + Console.WriteLine(); + } + } + + public static IEnumerable GetTypes(int formatVersion, MethodDefinition types) + { + var entrySize = formatVersion == 1 ? 2 : 3; + + types.Body.SimplifyMacros(); + var il = types.Body.Instructions; + for (var i = 0; i + entrySize < il.Count; i += entrySize) + { + var type = (TypeReference)il[i + 0].Operand; + var size = (int)il[i + 1].Operand; + yield return new TypeStats + { + Type = type, + Size = size + }; + } + } + + public static IEnumerable GetMethods(int formatVersion, MethodDefinition methods) + { + var entrySize = formatVersion == 1 ? 4 : 5; + + methods.Body.SimplifyMacros(); + var il = methods.Body.Instructions; + for (var i = 0; i + entrySize < il.Count; i += entrySize) + { + var method = (MethodReference)il[i + 0].Operand; + var size = (int)il[i + 1].Operand; + var gcInfoSize = (int)il[i + 2].Operand; + var ehInfoSize = (int)il[i + 3].Operand; + yield return new MethodStats + { + Method = method, + Size = size, + GcInfoSize = gcInfoSize, + EhInfoSize = ehInfoSize + }; + } + } + + public static IEnumerable GetBlobs(MethodDefinition blobs) + { + blobs.Body.SimplifyMacros(); + var il = blobs.Body.Instructions; + for (var i = 0; i + 2 < il.Count; i += 2) + { + var name = (string)il[i + 0].Operand; + var size = (int)il[i + 1].Operand; + yield return new BlobStats + { + Name = name, + Size = size + }; + } + } + } + + public class TypeStats + { + public string MethodName { get; set; } + public TypeReference Type { get; set; } + public int Size { get; set; } + } + + public class MethodStats + { + public MethodReference Method { get; set; } + public int Size { get; set; } + public int GcInfoSize { get; set; } + public int EhInfoSize { get; set; } + } + + public class BlobStats + { + public string Name { get; set; } + public int Size { get; set; } + } +} diff --git a/test/Npgsql.Benchmarks/App.config b/test/Npgsql.Benchmarks/App.config index 88fa4027bd..614f539efc 100644 --- a/test/Npgsql.Benchmarks/App.config +++ b/test/Npgsql.Benchmarks/App.config @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs b/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs index 4704cc90e3..913fd59ebe 100644 --- a/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs +++ b/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs @@ -1,4 +1,4 @@ -using System; +using System; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs b/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs index c75febe708..77e9eccec4 100644 --- a/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs +++ b/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics.CodeAnalysis; using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; @@ -55,8 +55,6 @@ public object ExecuteReader() class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/Commit.cs b/test/Npgsql.Benchmarks/Commit.cs index 96e04ade96..cae0b300a6 100644 --- a/test/Npgsql.Benchmarks/Commit.cs +++ b/test/Npgsql.Benchmarks/Commit.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; @@ -29,8 +29,6 @@ public void Basic() class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs b/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs index e63bbba7c6..f47d4ae4fe 100644 --- a/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs +++ b/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using Microsoft.Data.SqlClient; @@ -22,8 +22,6 @@ public class ConnectionCreationBenchmarks class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs b/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs index d733ff9c11..9f39915cfa 100644 --- a/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs +++ b/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using Microsoft.Data.SqlClient; @@ -168,8 +168,6 @@ public void NonPooled() class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/CopyExport.cs b/test/Npgsql.Benchmarks/CopyExport.cs index e4ea9c0698..79f30c42c7 100644 --- a/test/Npgsql.Benchmarks/CopyExport.cs +++ b/test/Npgsql.Benchmarks/CopyExport.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using NpgsqlTypes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/CopyImport.cs b/test/Npgsql.Benchmarks/CopyImport.cs index 486d257d6c..cfabaec2c1 100644 --- a/test/Npgsql.Benchmarks/CopyImport.cs +++ b/test/Npgsql.Benchmarks/CopyImport.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using NpgsqlTypes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/GetFieldValue.cs b/test/Npgsql.Benchmarks/GetFieldValue.cs index 0065f4546c..be6b7bb1b8 100644 --- a/test/Npgsql.Benchmarks/GetFieldValue.cs +++ b/test/Npgsql.Benchmarks/GetFieldValue.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; diff --git a/test/Npgsql.Benchmarks/Insert.cs b/test/Npgsql.Benchmarks/Insert.cs index 2de57776d5..2caa8c6e4c 100644 --- a/test/Npgsql.Benchmarks/Insert.cs +++ b/test/Npgsql.Benchmarks/Insert.cs @@ -1,4 +1,4 @@ -using System.Text; +using System.Text; using BenchmarkDotNet.Attributes; using NpgsqlTypes; diff --git a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj index bc51b25561..c42fd783ea 100644 --- a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj +++ b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj @@ -1,9 +1,12 @@ - + portable Npgsql.Benchmarks Exe + + + NU1901;NU1902;NU1903;NU1904 diff --git a/test/Npgsql.Benchmarks/Prepare.cs b/test/Npgsql.Benchmarks/Prepare.cs index 246b25e491..cee771869e 100644 --- a/test/Npgsql.Benchmarks/Prepare.cs +++ b/test/Npgsql.Benchmarks/Prepare.cs @@ -1,4 +1,3 @@ -using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Reflection; using System.Text; @@ -55,9 +54,7 @@ public void GlobalSetup() [GlobalCleanup] public void GlobalCleanup() - { - _conn.Dispose(); - } + => _conn.Dispose(); public Prepare() { @@ -120,4 +117,4 @@ static string GenerateQuery(int tablesToJoin) .Values .Cast() .ToArray(); -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/Program.cs b/test/Npgsql.Benchmarks/Program.cs index 9a334f63b8..bd737133a0 100644 --- a/test/Npgsql.Benchmarks/Program.cs +++ b/test/Npgsql.Benchmarks/Program.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Running; +using BenchmarkDotNet.Running; using System.Reflection; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/ReadArray.cs b/test/Npgsql.Benchmarks/ReadArray.cs index fecda03f43..a69002a6a8 100644 --- a/test/Npgsql.Benchmarks/ReadArray.cs +++ b/test/Npgsql.Benchmarks/ReadArray.cs @@ -1,10 +1,4 @@ -using BenchmarkDotNet.Attributes; -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.IO; -using System.Linq; -using System.Runtime.CompilerServices; +using BenchmarkDotNet.Attributes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/ReadColumns.cs b/test/Npgsql.Benchmarks/ReadColumns.cs index aa10d25f1a..0eb7f0cf33 100644 --- a/test/Npgsql.Benchmarks/ReadColumns.cs +++ b/test/Npgsql.Benchmarks/ReadColumns.cs @@ -1,4 +1,4 @@ -using System.Linq; +using System.Linq; using System.Reflection; using System.Text; using BenchmarkDotNet.Attributes; diff --git a/test/Npgsql.Benchmarks/ReadRows.cs b/test/Npgsql.Benchmarks/ReadRows.cs index 7ec8d9ed09..04256249e0 100644 --- a/test/Npgsql.Benchmarks/ReadRows.cs +++ b/test/Npgsql.Benchmarks/ReadRows.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/ResolveHandler.cs b/test/Npgsql.Benchmarks/ResolveHandler.cs index 1b8bf6a8af..b36d9b51e4 100644 --- a/test/Npgsql.Benchmarks/ResolveHandler.cs +++ b/test/Npgsql.Benchmarks/ResolveHandler.cs @@ -1,15 +1,13 @@ using BenchmarkDotNet.Attributes; -using Npgsql.Internal.TypeHandling; -using Npgsql.TypeMapping; -using NpgsqlTypes; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; namespace Npgsql.Benchmarks; [MemoryDiagnoser] public class ResolveHandler { - NpgsqlDataSource? _dataSource; - TypeMapper _typeMapper = null!; + PgSerializerOptions _serializerOptions = null!; [Params(0, 1, 2)] public int NumPlugins { get; set; } @@ -22,30 +20,21 @@ public void Setup() dataSourceBuilder.UseNodaTime(); if (NumPlugins > 1) dataSourceBuilder.UseNetTopologySuite(); - _dataSource = dataSourceBuilder.Build(); - _typeMapper = _dataSource.TypeMapper; - } - - [GlobalCleanup] - public void Cleanup() => _dataSource?.Dispose(); - - [Benchmark] - public NpgsqlTypeHandler ResolveOID() - => _typeMapper.ResolveByOID(23); // int4 - [Benchmark] - public NpgsqlTypeHandler ResolveNpgsqlDbType() - => _typeMapper.ResolveByNpgsqlDbType(NpgsqlDbType.Integer); + // Alternatively we must build a data source and get it bootstrapped against a real database. + (_, var config) = dataSourceBuilder.PrepareConfiguration(); + _serializerOptions = new PgSerializerOptions(PostgresMinimalDatabaseInfo.DefaultTypeCatalog, config.ResolverChain); + } [Benchmark] - public NpgsqlTypeHandler ResolveDataTypeName() - => _typeMapper.ResolveByDataTypeName("integer"); + public PgTypeInfo? ResolveDefault() + => _serializerOptions.GetTypeInfoInternal(null, new Oid(23)); // int4 [Benchmark] - public NpgsqlTypeHandler ResolveClrTypeNonGeneric() - => _typeMapper.ResolveByValue((object)8); + public PgTypeInfo? ResolveType() + => _serializerOptions.GetTypeInfoInternal(typeof(int), null); [Benchmark] - public NpgsqlTypeHandler ResolveClrTypeGeneric() - => _typeMapper.ResolveByValue(8); + public PgTypeInfo? ResolveBoth() + => _serializerOptions.GetTypeInfoInternal(typeof(int), new Oid(23)); // int4 } diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs b/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs index 496b51af6f..9ac8a6fa96 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs @@ -1,9 +1,4 @@ -using System.Collections.Generic; -using BenchmarkDotNet.Attributes; -using Npgsql.NameTranslation; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using Npgsql.Util; + /* Disabling for now: unmapped composite support is probably going away, and there's a good chance this * class can be simplified to a certain extent diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs index 19e044b0a4..2ac09063c6 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs @@ -1,46 +1,29 @@ -using System.Collections.Generic; +using System.Collections.Generic; using BenchmarkDotNet.Attributes; -using Npgsql.Internal.TypeHandlers.NumericHandlers; +using Npgsql.Internal.Converters; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Int16 : TypeHandlerBenchmarks -{ - public Int16() : base(new Int16Handler(GetPostgresType("smallint"))) { } -} +public class Int16() : TypeHandlerBenchmarks(new Int2Converter()); [Config(typeof(Config))] -public class Int32 : TypeHandlerBenchmarks -{ - public Int32() : base(new Int32Handler(GetPostgresType("integer"))) { } -} +public class Int32() : TypeHandlerBenchmarks(new Int4Converter()); [Config(typeof(Config))] -public class Int64 : TypeHandlerBenchmarks -{ - public Int64() : base(new Int64Handler(GetPostgresType("bigint"))) { } -} +public class Int64() : TypeHandlerBenchmarks(new Int8Converter()); [Config(typeof(Config))] -public class Single : TypeHandlerBenchmarks -{ - public Single() : base(new SingleHandler(GetPostgresType("real"))) { } -} +public class Single() : TypeHandlerBenchmarks(new RealConverter()); [Config(typeof(Config))] -public class Double : TypeHandlerBenchmarks -{ - public Double() : base(new DoubleHandler(GetPostgresType("double precision"))) { } -} +public class Double() : TypeHandlerBenchmarks(new DoubleConverter()); [Config(typeof(Config))] -public class Numeric : TypeHandlerBenchmarks +public class Numeric() : TypeHandlerBenchmarks(new DecimalNumericConverter()) { - public Numeric() : base(new NumericHandler(GetPostgresType("numeric"))) { } - - protected override IEnumerable ValuesOverride() => new[] - { + protected override IEnumerable ValuesOverride() => + [ 0.0000000000000000000000000001M, 0.000000000000000000000001M, 0.00000000000000000001M, @@ -55,12 +38,9 @@ protected override IEnumerable ValuesOverride() => new[] 10000000000000000M, 100000000000000000000M, 1000000000000000000000000M, - 10000000000000000000000000000M, - }; + 10000000000000000000000000000M + ]; } [Config(typeof(Config))] -public class Money : TypeHandlerBenchmarks -{ - public Money() : base(new MoneyHandler(GetPostgresType("money"))) { } -} \ No newline at end of file +public class Money() : TypeHandlerBenchmarks(new MoneyConverter()); diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 407a749240..34f3226326 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -1,18 +1,17 @@ -using BenchmarkDotNet.Attributes; +using Npgsql.Internal; +using Npgsql.Internal.Converters; using System.Collections.Generic; using System.Text; -using Npgsql.Internal.TypeHandlers; +using BenchmarkDotNet.Attributes; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Text : TypeHandlerBenchmarks +public class Text() : TypeHandlerBenchmarks(TextConverter.CreateStringConverter(PgSerializerOptions.DefaultUtf8Encoding)) { - public Text() : base(new TextHandler(GetPostgresType("text"), Encoding.UTF8)) { } - protected override IEnumerable ValuesOverride() { - for (var i = 1; i <= 10000; i *= 10) + for (var i = Encoding.UTF8.GetByteCount("x"); i <= NpgsqlWriteBuffer.DefaultSize; i *= 4) yield return new string('x', i); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index 76cc862378..5dcf0f53cd 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -1,17 +1,13 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using BenchmarkDotNet.Diagnosers; using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.IO; -using System.Text; +using System.Threading; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Util; - -#nullable disable namespace Npgsql.Benchmarks.TypeHandlers; @@ -40,65 +36,93 @@ public override void SetLength(long value) { } public override void Write(byte[] buffer, int offset, int count) { } } - readonly EndlessStream _stream; - readonly NpgsqlTypeHandler _handler; - readonly NpgsqlReadBuffer _readBuffer; + readonly PgConverter _converter; + readonly PgReader _reader; + readonly PgWriter _writer; readonly NpgsqlWriteBuffer _writeBuffer; - T _value; - int _elementSize; + readonly NpgsqlReadBuffer _readBuffer; + readonly BufferRequirements _binaryRequirements; - protected TypeHandlerBenchmarks(NpgsqlTypeHandler handler) - { - _stream = new EndlessStream(); - _handler = handler ?? throw new ArgumentNullException(nameof(handler)); - _readBuffer = new NpgsqlReadBuffer(null, _stream, null, NpgsqlReadBuffer.MinimumSize, Encoding.UTF8, PGUtil.RelaxedUTF8Encoding); - _writeBuffer = new NpgsqlWriteBuffer(null, _stream, null, NpgsqlWriteBuffer.MinimumSize, Encoding.UTF8); - } + T? _value; + PgValueBinding _valueBinding; + PgFieldBinding _fieldBinding; - protected static PostgresType GetPostgresType(string pgType) + protected TypeHandlerBenchmarks(PgConverter handler) { - using (var conn = BenchmarkEnvironment.OpenConnection()) - using (var cmd = new NpgsqlCommand($"SELECT NULL::{pgType}", conn)) - using (var reader = cmd.ExecuteReader()) - return reader.GetPostgresType(0); + var stream = new EndlessStream(); + _converter = (PgConverter)handler ?? throw new ArgumentNullException(nameof(handler)); + _readBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); + _writeBuffer = new NpgsqlWriteBuffer(null, stream, null, NpgsqlWriteBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding) { MessageLengthValidation = false }; + _reader = new PgReader(_readBuffer); + _writer = _writeBuffer.GetWriter(new PostgresMinimalDatabaseInfo(), FlushMode.Blocking); + _converter.CanConvert(DataFormat.Binary, out _binaryRequirements); } - public IEnumerable Values() => ValuesOverride(); + public IEnumerable Values() => ValuesOverride(); - protected virtual IEnumerable ValuesOverride() => new[] { default(T) }; + protected virtual IEnumerable ValuesOverride() => [default]; - [ParamsSource(nameof(Values))] + [ParamsSource(nameof(Values)), MaybeNull] public T Value { get => _value; set { - NpgsqlLengthCache cache = null; + // Workaround for https://github.com/dotnet/BenchmarkDotNet/issues/3049 + if (default(T) is null && value is null) + return; - _value = value; - _elementSize = _handler.ValidateAndGetLength(value, ref cache, null); - - cache.Rewind(); - - _handler.WriteWithLength(_value, _writeBuffer, cache, null, false); - Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _elementSize); + if (_reader.Initialized) + { + // Prevent Commit from calling Skip, which would cause us to try and use the null connector. + _readBuffer.ReadPosition += _reader.CurrentRemaining; + _reader.Commit(); + } - _readBuffer.FilledBytes = _elementSize; - _writeBuffer.WritePosition = 0; + _value = value; + object? writeState = null; + var size = _converter.IsDbNullOrGetSize(DataFormat.Binary, _binaryRequirements.Write, value, ref writeState); + _valueBinding = new PgValueBinding(DataFormat.Binary, _binaryRequirements.Write, size, writeState); + + if (!_valueBinding.IsDbNullBinding) + { + _writer.StartWrite(async: false, _valueBinding, CancellationToken.None).GetAwaiter().GetResult(); + _converter.Write(_writer, value!); + _writer.EndWrite(_valueBinding.Size.Value); + + Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _writeBuffer.WritePosition); + _readBuffer.AddBytesToRead(_writeBuffer.WritePosition); + _readBuffer.ReadPosition = 0; + _writeBuffer.WritePosition = 0; + + _reader.Init(_valueBinding.DataFormat, _valueBinding.Size.Value.GetValueOrDefault()); + _fieldBinding = new PgFieldBinding(DataFormat.Binary, _binaryRequirements.Read); + } } } [Benchmark] public T Read() { - _readBuffer.ReadPosition = sizeof(int); - return _handler.Read(_readBuffer, _elementSize); + if (_valueBinding.IsDbNullBinding) + return default!; + + _readBuffer.ReadPosition = 0; + _reader.StartRead(_fieldBinding); + var value = _converter.Read(_reader); + _reader.EndRead(); + return value; } [Benchmark] public void Write() { - _writeBuffer.WritePosition = 0; - _handler.WriteWithLength(_value, _writeBuffer, null, null, false); + if (_valueBinding.IsDbNullBinding) + return; + + _writer.RefreshBuffer(); + _writer.StartWrite(async: false, _valueBinding, CancellationToken.None).GetAwaiter().GetResult(); + _converter.Write(_writer, Value!); + _writer.EndWrite(_valueBinding.Size.GetValueOrDefault()); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs index 78d4018dfd..099acae43c 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs @@ -1,11 +1,8 @@ -using System; +using System; using BenchmarkDotNet.Attributes; -using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.Converters; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Uuid : TypeHandlerBenchmarks -{ - public Uuid() : base(new UuidHandler(GetPostgresType("uuid"))) { } -} \ No newline at end of file +public class Uuid() : TypeHandlerBenchmarks(new GuidUuidConverter()); diff --git a/test/Npgsql.Benchmarks/UnixDomainSocket.cs b/test/Npgsql.Benchmarks/UnixDomainSocket.cs index 89c42a9a49..71748c9ba0 100644 --- a/test/Npgsql.Benchmarks/UnixDomainSocket.cs +++ b/test/Npgsql.Benchmarks/UnixDomainSocket.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Linq; using BenchmarkDotNet.Attributes; diff --git a/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs b/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs index 429861f262..fee0e47f9c 100644 --- a/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs +++ b/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs @@ -1,4 +1,4 @@ -using System.Linq; +using System.Linq; using BenchmarkDotNet.Attributes; using NpgsqlTypes; diff --git a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs index 4deefc6a5a..ad7728835f 100644 --- a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs +++ b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs @@ -1,4 +1,5 @@ -using System.Data; +using System; +using System.Data; using System.Linq; using System.Threading.Tasks; using Microsoft.Extensions.DependencyInjection; @@ -9,13 +10,15 @@ namespace Npgsql.DependencyInjection.Tests; -public class DependencyInjectionTests +[TestFixture(DataSourceMode.Standard)] +[TestFixture(DataSourceMode.Slim)] +public class DependencyInjectionTests(DataSourceMode mode) { [Test] public async Task NpgsqlDataSource_is_registered_properly([Values] bool async) { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); await using var serviceProvider = serviceCollection.BuildServiceProvider(); var dataSource = serviceProvider.GetRequiredService(); @@ -29,7 +32,7 @@ public async Task NpgsqlDataSource_is_registered_properly([Values] bool async) public async Task NpgsqlMultiHostDataSource_is_registered_properly([Values] bool async) { var serviceCollection = new ServiceCollection(); - serviceCollection.AddMultiHostNpgsqlDataSource(TestUtil.ConnectionString); + RegisterMultiHostDataSource(serviceCollection, TestUtil.ConnectionString); await using var serviceProvider = serviceCollection.BuildServiceProvider(); var multiHostDataSource = serviceProvider.GetRequiredService(); @@ -42,11 +45,47 @@ public async Task NpgsqlMultiHostDataSource_is_registered_properly([Values] bool : dataSource.OpenConnection(); } + [Test] + public async Task NpgsqlDataSource_with_service_key_is_registered_properly([Values] bool async) + { + const string serviceKey = "key"; + var serviceCollection = new ServiceCollection(); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString, serviceKey); + + await using var serviceProvider = serviceCollection.BuildServiceProvider(); + var dataSource = serviceProvider.GetRequiredKeyedService(serviceKey); + Assert.Throws(() => serviceProvider.GetRequiredService()); + + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + } + + [Test] + public async Task NpgsqlMultiHostDataSource_with_service_key_is_registered_properly([Values] bool async) + { + const string serviceKey = "key"; + var serviceCollection = new ServiceCollection(); + RegisterMultiHostDataSource(serviceCollection, TestUtil.ConnectionString, serviceKey); + + await using var serviceProvider = serviceCollection.BuildServiceProvider(); + var multiHostDataSource = serviceProvider.GetRequiredKeyedService(serviceKey); + var dataSource = serviceProvider.GetRequiredKeyedService(serviceKey); + Assert.Throws(() => serviceProvider.GetRequiredService()); + Assert.Throws(() => serviceProvider.GetRequiredService()); + + Assert.That(dataSource, Is.SameAs(multiHostDataSource)); + + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + } + [Test] public void NpgsqlDataSource_is_registered_as_singleton_by_default() { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); using var serviceProvider = serviceCollection.BuildServiceProvider(); using var scope1 = serviceProvider.CreateScope(); @@ -64,7 +103,7 @@ public void NpgsqlDataSource_is_registered_as_singleton_by_default() public async Task NpgsqlConnection_is_registered_properly([Values] bool async) { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); using var serviceProvider = serviceCollection.BuildServiceProvider(); using var scope = serviceProvider.CreateScope(); @@ -84,7 +123,7 @@ public async Task NpgsqlConnection_is_registered_properly([Values] bool async) public void NpgsqlConnection_is_registered_as_transient_by_default() { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource("Host=localhost;Username=test;Password=test"); + RegisterDataSource(serviceCollection, "Host=localhost;Username=test;Password=test"); using var serviceProvider = serviceCollection.BuildServiceProvider(); using var scope1 = serviceProvider.CreateScope(); @@ -109,7 +148,7 @@ public async Task LoggerFactory_is_picked_up_from_ServiceCollection() var serviceCollection = new ServiceCollection(); serviceCollection.AddLogging(b => b.AddProvider(listLoggerProvider)); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); await using var serviceProvider = serviceCollection.BuildServiceProvider(); var dataSource = serviceProvider.GetRequiredService(); @@ -120,4 +159,26 @@ public async Task LoggerFactory_is_picked_up_from_ServiceCollection() Assert.That(listLoggerProvider.Log.Any(l => l.Id == NpgsqlEventId.CommandExecutionCompleted)); } + + IServiceCollection RegisterDataSource(ServiceCollection serviceCollection, string connectionString, object? serviceKey = null) + => mode switch + { + DataSourceMode.Standard => serviceCollection.AddNpgsqlDataSource(connectionString, serviceKey: serviceKey), + DataSourceMode.Slim => serviceCollection.AddNpgsqlSlimDataSource(connectionString, serviceKey: serviceKey), + _ => throw new NotSupportedException($"Mode {mode} not supported") + }; + + IServiceCollection RegisterMultiHostDataSource(ServiceCollection serviceCollection, string connectionString, object? serviceKey = null) + => mode switch + { + DataSourceMode.Standard => serviceCollection.AddMultiHostNpgsqlDataSource(connectionString, serviceKey: serviceKey), + DataSourceMode.Slim => serviceCollection.AddMultiHostNpgsqlSlimDataSource(connectionString, serviceKey: serviceKey), + _ => throw new NotSupportedException($"Mode {mode} not supported") + }; +} + +public enum DataSourceMode +{ + Standard, + Slim } diff --git a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj index f577d83d58..2f1f442547 100644 --- a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj +++ b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj @@ -1,14 +1,13 @@ - - - net7.0 - - + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj new file mode 100644 index 0000000000..7f9ce607ca --- /dev/null +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -0,0 +1,27 @@ + + + exe + true + true + true + true + true + true + false + true + false + + + + + + + + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + diff --git a/test/Npgsql.NativeAotTests/Program.cs b/test/Npgsql.NativeAotTests/Program.cs new file mode 100644 index 0000000000..098c978296 --- /dev/null +++ b/test/Npgsql.NativeAotTests/Program.cs @@ -0,0 +1,19 @@ +using System; +using Npgsql; + +var connectionString = Environment.GetEnvironmentVariable("NPGSQL_TEST_DB") + ?? "Server=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0"; + +var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); +await using var dataSource = dataSourceBuilder.Build(); + +await using var conn = dataSource.CreateConnection(); +await conn.OpenAsync(); +await using var cmd = new NpgsqlCommand("SELECT 'Hello World'", conn); +await using var reader = await cmd.ExecuteReaderAsync(); +if (!await reader.ReadAsync()) + throw new Exception("Got nothing from the database"); + +var value = reader.GetFieldValue(0); +if (value != "Hello World") + throw new Exception($"Got {value} instead of the expected 'Hello World'"); diff --git a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs deleted file mode 100644 index 89ee298192..0000000000 --- a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Data; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.Tests; -using NpgsqlTypes; -using NUnit.Framework; - -namespace Npgsql.NodaTime.Tests; - -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing -[NonParallelizable] -public class LegacyNodaTimeTests : TestBase -{ - [Test] - public Task Timestamp_as_Instant() - => AssertType( - new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), - "1998-04-12 13:26:38.789", - "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime); - - [Test] - public Task Timestamp_as_LocalDateTime() - => AssertType( - new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), - "1998-04-12 13:26:38.789", - "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime, - isDefaultForReading: false); - - [Test] - public Task Timestamptz_as_Instant() - => AssertType( - new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), - "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTimeOffset, - isDefault: false); - - [Test] - public Task Timestamptz_ZonedDateTime_infinite_values_are_not_supported() - => AssertTypeUnsupported(Instant.MaxValue.InZone(DateTimeZone.Utc), "infinity", "timestamptz"); - - [Test] - public Task Timestamptz_OffsetDateTime_infinite_values_are_not_supported() - => AssertTypeUnsupported(Instant.MaxValue.WithOffset(Offset.Zero), "infinity", "timestamptz"); - - #region Support - - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) - { - var conn = new NpgsqlConnection(connectionString ?? ConnectionString); - await conn.OpenAsync(); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; - } - - protected override NpgsqlConnection OpenConnection(string? connectionString = null) - => throw new NotSupportedException(); - -#pragma warning disable CS1998 // Release code blocks below lack await -#pragma warning disable CS0618 // GlobalTypeMapper is obsolete - [OneTimeSetUp] - public async Task Setup() - { -#if DEBUG - Internal.NodaTimeUtils.LegacyTimestampBehavior = true; - Util.Statics.LegacyTimestampBehavior = true; - - // Clear any previous cached mappings/handlers in case tests were executed before the legacy flag was set. - NpgsqlConnection.GlobalTypeMapper.Reset(); - NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); - await using var connection = await OpenConnectionAsync(); - await connection.ReloadTypesAsync(); -#else - Assert.Ignore( - "Legacy NodaTime tests rely on the Npgsql.EnableLegacyTimestampBehavior AppContext switch and can only be run in DEBUG builds"); -#endif - - } - - [OneTimeTearDown] - public async Task Teardown() - { -#if DEBUG - Internal.NodaTimeUtils.LegacyTimestampBehavior = false; - Util.Statics.LegacyTimestampBehavior = false; - - // Clear any previous cached mappings/handlers to not affect test which will run later without the legacy flag - NpgsqlConnection.GlobalTypeMapper.Reset(); - NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); - - await using var connection = await OpenConnectionAsync(); - await connection.ReloadTypesAsync(); -#endif - } -#pragma warning restore CS1998 -#pragma warning restore CS0618 // GlobalTypeMapper is obsolete - - #endregion Support -} diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs b/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs deleted file mode 100644 index 25ab4f58cd..0000000000 --- a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs +++ /dev/null @@ -1,18 +0,0 @@ -using NUnit.Framework; - -namespace Npgsql.NodaTime.Tests; - -// Note that we register NodaTime globally, rather than using the more standard data source mapping. -// We can do this since NUnit runs each test assembly in a different process, so we get isolation and don't interfere with other, -// non-NodaTime tests. This also allows us to test global type inference, which only works with global mappings. -[SetUpFixture] -public class NodaTimeSetupFixture -{ -#pragma warning disable CS0618 // GlobalTypeMapper is obsolete - [OneTimeSetUp] - public void OneTimeSetUp() => NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); - - [OneTimeTearDown] - public void OneTimeTearDown() => NpgsqlConnection.GlobalTypeMapper.Reset(); -#pragma warning restore CS0618 // GlobalTypeMapper is obsolete -} diff --git a/test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj b/test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj deleted file mode 100644 index bfa9b74079..0000000000 --- a/test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/test/Npgsql.PluginTests/GeoJSONTests.cs b/test/Npgsql.PluginTests/GeoJSONTests.cs index 2f44d0ec18..9e51a5b298 100644 --- a/test/Npgsql.PluginTests/GeoJSONTests.cs +++ b/test/Npgsql.PluginTests/GeoJSONTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Concurrent; using System.Linq; using System.Threading.Tasks; @@ -8,6 +8,7 @@ using GeoJSON.Net.Geometry; using Newtonsoft.Json; using Npgsql.Tests; +using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -22,89 +23,89 @@ public struct TestData } public static readonly TestData[] Tests = - { + [ new() { Geometry = new Point( new Position(longitude: 1d, latitude: 2d)) - { BoundingBoxes = new[] { 1d, 2d, 1d, 2d } }, + { BoundingBoxes = [1d, 2d, 1d, 2d] }, CommandText = "st_makepoint(1,2)" }, new() { - Geometry = new LineString(new[] { + Geometry = new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 1d, latitude: 2d) - }) - { BoundingBoxes = new[] { 1d, 1d, 1d, 2d } }, + ]) + { BoundingBoxes = [1d, 1d, 1d, 2d] }, CommandText = "st_makeline(st_makepoint(1,1), st_makepoint(1,2))" }, new() { - Geometry = new Polygon(new[] { - new LineString(new[] { + Geometry = new Polygon([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 2d, latitude: 2d), new Position(longitude: 3d, latitude: 3d), new Position(longitude: 1d, latitude: 1d) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 3d, 3d } }, + ]) + ]) + { BoundingBoxes = [1d, 1d, 3d, 3d] }, CommandText = "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1), st_makepoint(2,2), st_makepoint(3,3), st_makepoint(1,1)]))" }, new() { - Geometry = new MultiPoint(new[] { + Geometry = new MultiPoint([ new Point(new Position(longitude: 1d, latitude: 1d)) - }) - { BoundingBoxes = new[] { 1d, 1d, 1d, 1d } }, + ]) + { BoundingBoxes = [1d, 1d, 1d, 1d] }, CommandText = "st_multi(st_makepoint(1, 1))" }, new() { - Geometry = new MultiLineString(new[] { - new LineString(new[] { + Geometry = new MultiLineString([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 1d, latitude: 2d) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 1d, 2d } }, + ]) + ]) + { BoundingBoxes = [1d, 1d, 1d, 2d] }, CommandText = "st_multi(st_makeline(st_makepoint(1,1), st_makepoint(1,2)))" }, new() { - Geometry = new MultiPolygon(new[] { - new Polygon(new[] { - new LineString(new[] { + Geometry = new MultiPolygon([ + new Polygon([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 2d, latitude: 2d), new Position(longitude: 3d, latitude: 3d), new Position(longitude: 1d, latitude: 1d) - }) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 3d, 3d } }, + ]) + ]) + ]) + { BoundingBoxes = [1d, 1d, 3d, 3d] }, CommandText = "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1), st_makepoint(2,2), st_makepoint(3,3), st_makepoint(1,1)])))" }, new() { - Geometry = new GeometryCollection(new IGeometryObject[] { + Geometry = new GeometryCollection([ new Point(new Position(longitude: 1d, latitude: 1d)), - new MultiPolygon(new[] { - new Polygon(new[] { - new LineString(new[] { + new MultiPolygon([ + new Polygon([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 2d, latitude: 2d), new Position(longitude: 3d, latitude: 3d), new Position(longitude: 1d, latitude: 1d) - }) - }) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 3d, 3d } }, + ]) + ]) + ]) + ]) + { BoundingBoxes = [1d, 1d, 3d, 3d] }, CommandText = "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1), st_makepoint(2,2), st_makepoint(3,3), st_makepoint(1,1)]))))" - }, - }; + } + ]; [Test, TestCaseSource(nameof(Tests))] public async Task Read(TestData data) @@ -137,24 +138,24 @@ public async Task IgnoreM() } public static readonly TestData[] NotAllZSpecifiedTests = - { + [ new() { - Geometry = new LineString(new[] { + Geometry = new LineString([ new Position(1d, 1d, 0d), new Position(2d, 2d) - }) + ]) }, new() { - Geometry = new LineString(new[] { + Geometry = new LineString([ new Position(1d, 1d, 0d), new Position(2d, 2d), new Position(3d, 3d), new Position(4d, 4d) - }) + ]) } - }; + ]; [Test, TestCaseSource(nameof(NotAllZSpecifiedTests))] public async Task Not_all_Z_specified(TestData data) @@ -285,6 +286,128 @@ public async Task Roundtrip_geometry_geography() } } + [Test, TestCaseSource(nameof(Tests))] + public async Task Import_geometry(TestData data) + { + await using var conn = await OpenConnectionAsync(options: GeoJSONOptions.BoundingBox); + var table = await CreateTempTable(conn, "field geometry"); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync(data.Geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = $"SELECT field FROM {table}"; + await using var reader = await cmd.ExecuteReaderAsync(); + Assert.That(await reader.ReadAsync()); + var actual = reader.GetValue(0); + Assert.That(actual, Is.EqualTo(data.Geometry)); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4827")] + public async Task Import_big_geometry() + { + await using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "id text, field geometry"); + + var geometry = new MultiLineString([ + new LineString( + Enumerable.Range(1, 507) + .Select(i => new Position(longitude: i, latitude: i)) + .Append(new Position(longitude: 1d, latitude: 1d))), + new LineString([ + new Position(longitude: 1d, latitude: 1d), + new Position(longitude: 1d, latitude: 2d), + new Position(longitude: 1d, latitude: 3d), + new Position(longitude: 1d, latitude: 1d) + ]) + ]); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (id, field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync("a", NpgsqlDbType.Text); + await writer.WriteAsync(geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = $"SELECT field FROM {table}"; + await using var reader = await cmd.ExecuteReaderAsync(); + Assert.That(await reader.ReadAsync()); + var actual = reader.GetValue(0); + Assert.That(actual, Is.EqualTo(geometry)); + } + + [Test, TestCaseSource(nameof(Tests))] + public async Task Export_geometry(TestData data) + { + await using var conn = await OpenConnectionAsync(options: GeoJSONOptions.BoundingBox); + var table = await CreateTempTable(conn, "field geometry"); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync(data.Geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using (var reader = await conn.BeginBinaryExportAsync($"COPY {table} (field) TO STDOUT BINARY")) + { + await reader.StartRowAsync(); + var field = await reader.ReadAsync(NpgsqlDbType.Geometry); + Assert.That(field, Is.EqualTo(data.Geometry)); + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4830")] + public async Task Export_big_geometry() + { + await using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "id text, field geometry"); + + var geometry = new Polygon([ + new LineString( + Enumerable.Range(1, 507) + .Select(i => new Position(longitude: i, latitude: i)) + .Append(new Position(longitude: 1d, latitude: 1d))), + new LineString([ + new Position(longitude: 1d, latitude: 1d), + new Position(longitude: 1d, latitude: 2d), + new Position(longitude: 1d, latitude: 3d), + new Position(longitude: 1d, latitude: 1d) + ]) + ]); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (id, field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync("aaaa", NpgsqlDbType.Text); + await writer.WriteAsync(geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using (var reader = await conn.BeginBinaryExportAsync($"COPY {table} (id, field) TO STDOUT BINARY")) + { + await reader.StartRowAsync(); + var id = await reader.ReadAsync(); + var field = await reader.ReadAsync(NpgsqlDbType.Geometry); + Assert.That(id, Is.EqualTo("aaaa")); + Assert.That(field, Is.EqualTo(geometry)); + } + } + ValueTask OpenConnectionAsync(GeoJSONOptions options = GeoJSONOptions.None) => GetDataSource(options).OpenConnectionAsync(); diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index 49790a2172..9bc8198e79 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -1,10 +1,9 @@ -using Newtonsoft.Json; +using Newtonsoft.Json; using Newtonsoft.Json.Linq; using Npgsql.Tests; -using NpgsqlTypes; using NUnit.Framework; using System; -using System.Text; +using System.Data; using System.Threading.Tasks; // ReSharper disable AccessToModifiedClosure @@ -15,10 +14,9 @@ namespace Npgsql.PluginTests; /// /// Tests for the Npgsql.Json.NET mapping plugin /// -[NonParallelizable] -[TestFixture(NpgsqlDbType.Jsonb)] -[TestFixture(NpgsqlDbType.Json)] -public class JsonNetTests : TestBase +[TestFixture("jsonb")] +[TestFixture("json")] +public class JsonNetTests(string dataTypeName) : TestBase { [Test] public Task Roundtrip_object() @@ -26,10 +24,8 @@ public Task Roundtrip_object() JsonDataSource, new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_string() @@ -37,10 +33,8 @@ public Task Roundtrip_string() JsonDataSource, @"{""p"": 1}", @"{""p"": 1}", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_char_array() @@ -48,21 +42,17 @@ public Task Roundtrip_char_array() JsonDataSource, @"{""p"": 1}".ToCharArray(), @"{""p"": 1}", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_byte_array() => AssertType( JsonDataSource, - Encoding.ASCII.GetBytes(@"{""p"": 1}"), + @"{""p"": 1}"u8.ToArray(), @"{""p"": 1}", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary), valueTypeEqualsFieldType: false); [Test] public Task Roundtrip_JObject() @@ -70,10 +60,8 @@ public Task Roundtrip_JObject() JsonDataSource, new JObject { ["Bar"] = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] public Task Roundtrip_JArray() @@ -81,16 +69,14 @@ public Task Roundtrip_JArray() JsonDataSource, new JArray(new[] { 1, 2, 3 }), IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] public async Task Deserialize_failure() { await using var conn = await JsonDataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($@"SELECT '[1, 2, 3]'::{_pgTypeName}", conn); + await using var cmd = new NpgsqlCommand($@"SELECT '[1, 2, 3]'::{dataTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); // Attempt to deserialize JSON array into object @@ -105,19 +91,17 @@ public async Task Clr_type_mapping() { var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(Foo) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(Foo)]); else - dataSourceBuilder.UseJsonNet(jsonClrTypes: new[] { typeof(Foo) }); + dataSourceBuilder.UseJsonNet(jsonClrTypes: [typeof(Foo)]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( dataSource, new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - _pgTypeName, - _npgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } [Test] @@ -125,19 +109,17 @@ public async Task Roundtrip_clr_array() { var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(int[]) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(int[])]); else - dataSourceBuilder.UseJsonNet(jsonClrTypes: new[] { typeof(int[]) }); + dataSourceBuilder.UseJsonNet(jsonClrTypes: [typeof(int[])]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( dataSource, new[] { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - _pgTypeName, - _npgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, skipArrayCheck: true); // there is no value only mapping for int[][] } class DateWrapper @@ -154,33 +136,32 @@ public async Task Custom_serializer_settings() var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(DateWrapper) }, settings: settings); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(DateWrapper)], settings: settings); else - dataSourceBuilder.UseJsonNet(jsonClrTypes: new[] { typeof(DateWrapper) }, settings: settings); + dataSourceBuilder.UseJsonNet(jsonClrTypes: [typeof(DateWrapper)], settings: settings); await using var dataSource = dataSourceBuilder.Build(); await AssertType( dataSource, new DateWrapper { Date = new DateTime(2018, 04, 20) }, IsJsonb ? "{\"Date\": \"The 20th of April, 2018\"}" : "{\"Date\":\"The 20th of April, 2018\"}", - _pgTypeName, - _npgsqlDbType, - isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeName, + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } + [Test] public async Task Bug3464() { var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseJsonNet(new[] { typeof(Bug3464Class) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(Bug3464Class)]); await using var dataSource = dataSourceBuilder.Build(); var expected = new Bug3464Class { SomeString = new string('5', 8174) }; await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT @p1, @p2", conn); - cmd.Parameters.AddWithValue("p1", expected).NpgsqlDbType = _npgsqlDbType; - cmd.Parameters.AddWithValue("p2", expected).NpgsqlDbType = _npgsqlDbType; + cmd.Parameters.AddWithValue("p1", expected).DataTypeName = dataTypeName; + cmd.Parameters.AddWithValue("p2", expected).DataTypeName = dataTypeName; await using var reader = cmd.ExecuteReader(); } @@ -190,8 +171,65 @@ public class Bug3464Class public string? SomeString { get; set; } } - readonly NpgsqlDbType _npgsqlDbType; - readonly string _pgTypeName; + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5475")] + public async Task Read_jarray_from_get_value() + { + await using var conn = await JsonDataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var json = new JArray(new JObject { { "name", "value1" } }); + + cmd.CommandText = $"SELECT @p"; + cmd.Parameters.Add(new("p", json)); + await cmd.ExecuteScalarAsync(); + } + [Test] + public async Task Write_jobject_without_npgsqldbtype() + { + await using var conn = await JsonDataSource.OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject = new JObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", jsonObject)); + await cmd.ExecuteNonQueryAsync(); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jobject_array_without_npgsqldbtype() + { + await using var conn = await JsonDataSource.OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject1 = new JObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + var jsonObject2 = new JObject + { + { "name", "value2" }, + { "amount", 2 }, + { "unit", "g" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); + await cmd.ExecuteNonQueryAsync(); + } class Foo { @@ -212,13 +250,7 @@ public void SetUp() public async Task Teardown() => await JsonDataSource.DisposeAsync(); - public JsonNetTests(NpgsqlDbType npgsqlDbType) - { - _npgsqlDbType = npgsqlDbType; - _pgTypeName = npgsqlDbType.ToString().ToLower(); - } - - bool IsJsonb => _npgsqlDbType == NpgsqlDbType.Jsonb; + bool IsJsonb => dataTypeName == "jsonb"; NpgsqlDataSource JsonDataSource = default!; } diff --git a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs new file mode 100644 index 0000000000..ff177b38a4 --- /dev/null +++ b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs @@ -0,0 +1,93 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using NodaTime; +using Npgsql.NodaTime.Internal; +using Npgsql.Tests; +using NUnit.Framework; + +namespace Npgsql.PluginTests; + +[NonParallelizable] // Since this test suite manipulates an AppContext switch +public class LegacyNodaTimeTests : TestBase, IDisposable +{ + const string TimeZone = "Europe/Berlin"; + + [Test] + public async Task Timestamp_as_ZonedDateTime() + => await AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InZoneLeniently(DateTimeZoneProviders.Tzdb[TimeZone]), + "1998-04-12 13:26:38.789+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTimeOffset, DbType.Object), valueTypeEqualsFieldType: false); + + [Test] + public Task Timestamp_as_Instant() + => AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), + "1998-04-12 13:26:38.789", + "timestamp without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object)); + + [Test] + public Task Timestamp_as_LocalDateTime() + => AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), + "1998-04-12 13:26:38.789", + "timestamp without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object), valueTypeEqualsFieldType: false); + + [Test] + public Task Timestamptz_as_Instant() + => AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), + "1998-04-12 15:26:38.789+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTimeOffset, DbType.Object)); + + [Test] + public async Task Timestamptz_ZonedDateTime_infinite_values_are_not_supported() + { + await AssertTypeUnsupportedRead("infinity", "timestamptz"); + await AssertTypeUnsupportedWrite(Instant.MaxValue.WithOffset(Offset.Zero), "timestamptz"); + } + + [Test] + public async Task Timestamptz_OffsetDateTime_infinite_values_are_not_supported() + { + await AssertTypeUnsupportedRead("infinity", "timestamptz"); + await AssertTypeUnsupportedWrite(Instant.MaxValue.WithOffset(Offset.Zero), "timestamptz"); + } + + #region Support + + protected override NpgsqlDataSource DataSource { get; } + + public LegacyNodaTimeTests() + { +#if DEBUG + NodaTimeUtils.LegacyTimestampBehavior = true; + Util.Statics.LegacyTimestampBehavior = true; + + var builder = CreateDataSourceBuilder(); + builder.UseNodaTime(); + builder.ConnectionStringBuilder.Timezone = TimeZone; + DataSource = builder.Build(); +#else + Assert.Ignore( + "Legacy NodaTime tests rely on the Npgsql.EnableLegacyTimestampBehavior AppContext switch and can only be run in DEBUG builds"); +#endif + } + + public void Dispose() + { +#if DEBUG + NodaTimeUtils.LegacyTimestampBehavior = false; + Util.Statics.LegacyTimestampBehavior = false; + + DataSource.Dispose(); +#endif + } + + #endregion Support +} diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 20fc9f17a4..7e9d8caa84 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections; +using System; using System.Collections.Concurrent; using System.Linq; using System.Threading.Tasks; @@ -14,144 +13,129 @@ namespace Npgsql.PluginTests; public class NetTopologySuiteTests : TestBase { - public struct TestData - { - public Ordinates Ordinates; - public Geometry Geometry; - public string CommandText; - } + static readonly TestCaseData[] TestCases = + [ + new TestCaseData(Ordinates.None, new Point(1d, 2500d), "st_makepoint(1,2500)") + .SetName("Point"), - public static IEnumerable TestCases { - get - { - // Two dimensional data - yield return new TestCaseData(Ordinates.None, new Point(1d, 2500d), "st_makepoint(1,2500)"); + new TestCaseData(Ordinates.None, new MultiPoint([new Point(new Coordinate(1d, 1d))]), "st_multi(st_makepoint(1, 1))") + .SetName("MultiPoint"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, - new LineString(new[] { new Coordinate(1d, 1d), new Coordinate(1d, 2500d) }), - "st_makeline(st_makepoint(1,1),st_makepoint(1,2500))" - ); + new LineString([new Coordinate(1d, 1d), new Coordinate(1d, 2500d)]), + "st_makeline(st_makepoint(1,1),st_makepoint(1,2500))") + .SetName("LineString"), + + new TestCaseData( + Ordinates.None, + new MultiLineString([ + new LineString([ + new Coordinate(1d, 1d), + new Coordinate(1d, 2500d) + ]) + ]), + "st_multi(st_makeline(st_makepoint(1,1),st_makepoint(1,2500)))") + .SetName("MultiLineString"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ), - "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))" - ); - - yield return new TestCaseData( - Ordinates.None, - new MultiPoint(new[] { new Point(new Coordinate(1d, 1d)) }), - "st_multi(st_makepoint(1, 1))" - ); - - yield return new TestCaseData( - Ordinates.None, - new MultiLineString(new[] - { - new LineString(new[] - { - new Coordinate(1d, 1d), - new Coordinate(1d, 2500d) - }) - }), - "st_multi(st_makeline(st_makepoint(1,1),st_makepoint(1,2500)))" - ); + "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))") + .SetName("Polygon"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }), - "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))" - ); + ]), + "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))") + .SetName("MultiPolygon"), - yield return new TestCaseData( - Ordinates.None, - GeometryCollection.Empty, - "st_geomfromtext('GEOMETRYCOLLECTION EMPTY')" - ); + new TestCaseData(Ordinates.None, GeometryCollection.Empty, "st_geomfromtext('GEOMETRYCOLLECTION EMPTY')") + .SetName("EmptyCollection"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, - new GeometryCollection(new Geometry[] - { + new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }) - }), - "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))))" - ); + ]) + ]), + "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))))") + .SetName("Collection"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, - new GeometryCollection(new Geometry[] - { + new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new GeometryCollection(new Geometry[] - { + new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }) - }) - }), - "st_collect(st_makepoint(1,1),st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))))" - ); + ]) + ]) + ]), + "st_collect(st_makepoint(1,1),st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))))") + .SetName("CollectionNested"), - yield return new TestCaseData(Ordinates.XYZ, new Point(1d, 2d, 3d), "st_makepoint(1,2,3)"); + new TestCaseData(Ordinates.XYZ, new Point(1d, 2d, 3d), "st_makepoint(1,2,3)") + .SetName("PointXYZ"), - yield return new TestCaseData( + new TestCaseData( Ordinates.XYZM, new Point( - new DotSpatialAffineCoordinateSequence(new[] { 1d, 2d }, new[] { 3d }, new[] { 4d }), + new DotSpatialAffineCoordinateSequence([1d, 2d], [3d], [4d]), GeometryFactory.Default), - "st_makepoint(1,2,3,4)" - ); - } - } + "st_makepoint(1,2,3,4)") + .SetName("PointXYZM"), + + new TestCaseData( + Ordinates.None, + new LinearRing([ + new Coordinate(1d, 1d), + new Coordinate(2d, 2d), + new Coordinate(3d, 3d), + new Coordinate(1d, 1d) + ]), + "st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])") + .SetName("LinearRing") + ]; [Test, TestCaseSource(nameof(TestCases))] public async Task Read(Ordinates ordinates, Geometry geometry, string sqlRepresentation) { - using var conn = await OpenConnectionAsync(); - using var cmd = conn.CreateCommand(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); cmd.CommandText = $"SELECT {sqlRepresentation}"; Assert.That(Equals(cmd.ExecuteScalar(), geometry)); } @@ -159,33 +143,111 @@ public async Task Read(Ordinates ordinates, Geometry geometry, string sqlReprese [Test, TestCaseSource(nameof(TestCases))] public async Task Write(Ordinates ordinates, Geometry geometry, string sqlRepresentation) { - using var conn = await OpenConnectionAsync(handleOrdinates: ordinates); - using var cmd = conn.CreateCommand(); + await using var conn = await OpenConnectionAsync(handleOrdinates: ordinates); + await using var cmd = conn.CreateCommand(); cmd.Parameters.AddWithValue("p1", geometry); cmd.CommandText = $"SELECT st_asewkb(@p1) = st_asewkb({sqlRepresentation})"; Assert.That(cmd.ExecuteScalar(), Is.True); } + [Test] + public async Task ReadWithHandleOrdinatesXY_FiltersZCoordinate() + { + // This test verifies that handleOrdinates IS respected during read operations + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XY); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT ST_MakePoint(1, 2, 3)"; // Create a 3D point in SQL + + var result = (Point)cmd.ExecuteScalar()!; + + // The Z coordinate should be filtered out during reading based on handleOrdinates: XY + Assert.That(result.CoordinateSequence.HasZ, Is.False, + "Z coordinate was correctly filtered during read"); + Assert.That(result.X, Is.EqualTo(1d)); + Assert.That(result.Y, Is.EqualTo(2d)); + Assert.That(result.Z, Is.NaN, "Z coordinate should be NaN when filtered out"); + } + + [Test] + public async Task WriteWithHandleOrdinatesXY_ShouldFilterZCoordinate() + { + // This test verifies that when handleOrdinates is set to XY, + // Z coordinates are correctly filtered out during write operations. + var pointWithZ = new Point(1d, 2d, 3d); + + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XY); + await using var cmd = conn.CreateCommand(); + cmd.Parameters.AddWithValue("p1", pointWithZ); + cmd.CommandText = "SELECT ST_Z(@p1::geometry)"; + + var result = cmd.ExecuteScalar(); + + // Z coordinate should be filtered out and return NULL + Assert.That(result, Is.EqualTo(DBNull.Value), + "Z coordinate should be filtered during write when handleOrdinates: Ordinates.XY"); + } + + [Test] + public async Task WriteWithHandleOrdinatesXY_ShouldFilterMCoordinate() + { + // This test verifies that when handleOrdinates is set to XY, + // M coordinates are correctly filtered out during write operations. + var pointWithM = new Point( + new DotSpatialAffineCoordinateSequence([1d, 2d], [double.NaN], [4d]), + GeometryFactory.Default); + + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XY); + await using var cmd = conn.CreateCommand(); + cmd.Parameters.AddWithValue("p1", pointWithM); + cmd.CommandText = "SELECT ST_M(@p1::geometry)"; + + var result = cmd.ExecuteScalar(); + + // M coordinate should be filtered out and return NULL + Assert.That(result, Is.EqualTo(DBNull.Value), + "M coordinate should be filtered during write when handleOrdinates: Ordinates.XY"); + } + + [Test] + public async Task WriteWithHandleOrdinatesXYZ_ShouldFilterMCoordinate() + { + // This test verifies that when handleOrdinates is set to XYZ, + // M coordinates are correctly filtered out during write operations. + var pointWithZM = new Point( + new DotSpatialAffineCoordinateSequence([1d, 2d], [3d], [4d]), + GeometryFactory.Default); + + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XYZ); + await using var cmd = conn.CreateCommand(); + cmd.Parameters.AddWithValue("p1", pointWithZM); + cmd.CommandText = "SELECT ST_M(@p1::geometry)"; + + var result = cmd.ExecuteScalar(); + + // M coordinate should be filtered out and return NULL + Assert.That(result, Is.EqualTo(DBNull.Value), + "M coordinate should be filtered during write when handleOrdinates: Ordinates.XYZ"); + } + [Test] public async Task Array() { var point = new Point(new Coordinate(1d, 1d)); await AssertType( - NtsDataSource, + DataSource, new Geometry[] { point }, '{' + GetSqlLiteral(point) + '}', "geometry[]", - NpgsqlDbType.Geometry | NpgsqlDbType.Array, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] public async Task Read_as_concrete_type() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT st_makepoint(1,1)", conn); - using var reader = cmd.ExecuteReader(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT st_makepoint(1,1)", conn); + await using var reader = cmd.ExecuteReader(); reader.Read(); Assert.That(reader.GetFieldValue(0), Is.EqualTo(new Point(new Coordinate(1d, 1d)))); Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); @@ -195,16 +257,16 @@ public async Task Read_as_concrete_type() public async Task Roundtrip_geometry_geography() { var point = new Point(new Coordinate(1d, 1d)); - using var conn = await OpenConnectionAsync(); - conn.ExecuteNonQuery("CREATE TEMP TABLE data (geom GEOMETRY, geog GEOGRAPHY)"); - using (var cmd = new NpgsqlCommand("INSERT INTO data (geom, geog) VALUES (@p, @p)", conn)) + await using var conn = await OpenConnectionAsync(); + await conn.ExecuteNonQueryAsync("CREATE TEMP TABLE data (geom GEOMETRY, geog GEOGRAPHY)"); + await using (var cmd = new NpgsqlCommand("INSERT INTO data (geom, geog) VALUES (@p, @p)", conn)) { cmd.Parameters.AddWithValue("@p", point); cmd.ExecuteNonQuery(); } - using (var cmd = new NpgsqlCommand("SELECT geom, geog FROM data", conn)) - using (var reader = cmd.ExecuteReader()) + await using (var cmd = new NpgsqlCommand("SELECT geom, geog FROM data", conn)) + await using (var reader = cmd.ExecuteReader()) { reader.Read(); Assert.That(reader[0], Is.EqualTo(point)); @@ -215,7 +277,7 @@ public async Task Roundtrip_geometry_geography() [Test, Explicit] public async Task Concurrency_test() { - await using var adminConnection = OpenConnection(); + await using var adminConnection = await OpenConnectionAsync(); var table = await CreateTempTable( adminConnection, "point GEOMETRY, linestring GEOMETRY, polygon GEOMETRY, " + @@ -224,53 +286,45 @@ public async Task Concurrency_test() await adminConnection.ExecuteNonQueryAsync($"INSERT INTO {table} DEFAULT VALUES"); var point = new Point(new Coordinate(1d, 1d)); - var lineString = new LineString(new[] { new Coordinate(1d, 1d), new Coordinate(1d, 2500d) }); + var lineString = new LineString([new Coordinate(1d, 1d), new Coordinate(1d, 2500d)]); var polygon = new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ); - var multiPoint = new MultiPoint(new[] { new Point(new Coordinate(1d, 1d)) }); - var multiLineString = new MultiLineString(new[] - { - new LineString(new[] - { + var multiPoint = new MultiPoint([new Point(new Coordinate(1d, 1d))]); + var multiLineString = new MultiLineString([ + new LineString([ new Coordinate(1d, 1d), new Coordinate(1d, 2500d) - }) - }); - var multiPolygon = new MultiPolygon(new[] - { + ]) + ]); + var multiPolygon = new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }); - var collection = new GeometryCollection(new Geometry[] - { + ]); + var collection = new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }) - }); + ]) + ]); await Task.WhenAll(Enumerable.Range(0, 30).Select(i => Task.Run(async () => { @@ -324,7 +378,7 @@ protected ValueTask OpenConnectionAsync(string? connectionStri }); if (handleOrdinates == Ordinates.XY) - NtsDataSource = dataSource; + _xyDataSource ??= dataSource; return dataSource.OpenConnectionAsync(); } @@ -343,6 +397,8 @@ public async Task SetUp() public async Task Teardown() => await Task.WhenAll(NtsDataSources.Values.Select(async ds => await ds.DisposeAsync())); - NpgsqlDataSource NtsDataSource = default!; + protected override NpgsqlDataSource DataSource => _xyDataSource ?? throw new InvalidOperationException(); + NpgsqlDataSource? _xyDataSource; + ConcurrentDictionary NtsDataSources = new(); } diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs similarity index 58% rename from test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs rename to test/Npgsql.PluginTests/NodaTimeInfinityTests.cs index caa623e249..52068898d2 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs @@ -1,38 +1,52 @@ using System; +using System.Data; using System.Threading.Tasks; using NodaTime; using Npgsql.Tests; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; -namespace Npgsql.NodaTime.Tests; +namespace Npgsql.PluginTests; [TestFixture(false)] #if DEBUG [TestFixture(true)] +[NonParallelizable] // Since this test suite manipulates an AppContext switch #endif -[NonParallelizable] -public class NodaTimeInfinityTests : TestBase +public class NodaTimeInfinityTests : TestBase, IDisposable { [Test] // #4715 public async Task DateRange_with_upper_bound_infinity() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await AssertType( new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue), "[-infinity,infinity]", "daterange", - NpgsqlDbType.DateRange); + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, + """{"[-infinity,infinity]"}""", + "daterange[]", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, + """{[-infinity,infinity]}""", + "datemultirange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); } [Test] public async Task Timestamptz_read_values() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await using var conn = await OpenConnectionAsync(); @@ -50,7 +64,7 @@ public async Task Timestamptz_read_values() [Test] public async Task Timestamptz_write_values() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await using var conn = await OpenConnectionAsync(); @@ -83,7 +97,7 @@ public async Task Timestamptz_write() Parameters = { new() { Value = Instant.MinValue, NpgsqlDbType = NpgsqlDbType.TimestampTz } } }; - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { // NodaTime Instant.MinValue is outside the PG timestamp range. Assert.That(async () => await cmd.ExecuteScalarAsync(), @@ -100,7 +114,7 @@ public async Task Timestamptz_write() Parameters = { new() { Value = Instant.MaxValue, NpgsqlDbType = NpgsqlDbType.TimestampTz } } }; - Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); + Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(Statics.DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); } [Test] @@ -113,7 +127,7 @@ public async Task Timestamptz_read() await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { Assert.That(() => reader[0], Throws.Exception.TypeOf()); Assert.That(() => reader[1], Throws.Exception.TypeOf()); @@ -130,14 +144,12 @@ public async Task Timestamp_write() { await using var conn = await OpenConnectionAsync(); - // TODO: Switch to use LocalDateTime.MinMaxValue when available (#4061) - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn) { - Parameters = { new() { Value = LocalDate.MinIsoValue + LocalTime.MinValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } + Parameters = { new() { Value = LocalDateTime.MinIsoValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } }; - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { // NodaTime LocalDateTime.MinValue is outside the PG timestamp range. Assert.That(async () => await cmd.ExecuteScalarAsync(), @@ -151,10 +163,10 @@ public async Task Timestamp_write() await using var cmd2 = new NpgsqlCommand("SELECT $1::text", conn) { - Parameters = { new() { Value = LocalDate.MaxIsoValue + LocalTime.MaxValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } + Parameters = { new() { Value = LocalDateTime.MaxIsoValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } }; - Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions + Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(Statics.DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); } @@ -169,16 +181,15 @@ public async Task Timestamp_read() await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { Assert.That(() => reader[0], Throws.Exception.TypeOf()); Assert.That(() => reader[1], Throws.Exception.TypeOf()); } else { - // TODO: Switch to use LocalDateTime.MinMaxValue when available (#4061) - Assert.That(reader[0], Is.EqualTo(LocalDate.MinIsoValue + LocalTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(LocalDate.MaxIsoValue + LocalTime.MaxValue)); + Assert.That(reader[0], Is.EqualTo(LocalDateTime.MinIsoValue)); + Assert.That(reader[1], Is.EqualTo(LocalDateTime.MaxIsoValue)); } } @@ -193,7 +204,7 @@ public async Task Date_write() }; // LocalDate.MinIsoValue is outside of the PostgreSQL date range - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf() .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.DatetimeFieldOverflow)); @@ -202,7 +213,7 @@ public async Task Date_write() cmd.Parameters[0].Value = LocalDate.MaxIsoValue; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(Statics.DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); } [Test] @@ -215,7 +226,7 @@ public async Task Date_read() await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { Assert.That(() => reader[0], Throws.Exception.TypeOf()); Assert.That(() => reader[1], Throws.Exception.TypeOf()); @@ -230,7 +241,7 @@ public async Task Date_read() [Test, Description("Makes sure that when ConvertInfinityDateTime is true, infinity values are properly converted")] public async Task DateConvertInfinity() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await using var conn = await OpenConnectionAsync(); @@ -266,20 +277,111 @@ public async Task DateConvertInfinity() } } - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + [Test] + public async Task Interval_write() { - var conn = await base.OpenConnectionAsync(connectionString); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "17.0", "Infinity values for intervals were introduced in PostgreSQL 17"); + await using var cmd = new NpgsqlCommand("SELECT $1::text", conn) + { + Parameters = { new() { Value = Period.MinValue, NpgsqlDbType = NpgsqlDbType.Interval } } + }; + + // While Period.MinValue technically isn't outside of supported values by postgres, we can't reasonably convert it + if (Statics.DisableDateTimeInfinityConversions) + { + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + await conn.OpenAsync(); + } + else + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("-infinity")); + + cmd.Parameters[0].Value = Period.MaxValue; + + // While Period.MaxValue technically isn't outside of supported values by postgres, we can't reasonably convert it + if (Statics.DisableDateTimeInfinityConversions) + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + else + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("infinity")); } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) - => throw new NotSupportedException(); + [Test] + public async Task Interval_read() + { + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "17.0", "Infinity values for intervals were introduced in PostgreSQL 17"); + + await using var cmd = new NpgsqlCommand("SELECT '-infinity'::interval, 'infinity'::interval", conn); + + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + if (Statics.DisableDateTimeInfinityConversions) + { + Assert.That(() => reader[0], Throws.Exception.TypeOf()); + Assert.That(() => reader[1], Throws.Exception.TypeOf()); + } + else + { + Assert.That(reader[0], Is.EqualTo(Period.MinValue)); + Assert.That(reader[1], Is.EqualTo(Period.MaxValue)); + } + } + + [Test, Description("Makes sure that when ConvertInfinityDateTime is true, infinity values are properly converted")] + public async Task Interval_convert_infinity() + { + if (Statics.DisableDateTimeInfinityConversions) + return; + + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "17.0", "Infinity values for intervals were introduced in PostgreSQL 17"); + await conn.ExecuteNonQueryAsync("CREATE TEMP TABLE data (i1 INTERVAL, i2 INTERVAL)"); + + using (var cmd = new NpgsqlCommand("INSERT INTO data VALUES (@p1, @p2)", conn)) + { + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Interval, Period.MaxValue); + cmd.Parameters.AddWithValue("p2", NpgsqlDbType.Interval, Period.MinValue); + await cmd.ExecuteNonQueryAsync(); + } + + using (var cmd = new NpgsqlCommand("SELECT i1::TEXT, i2::TEXT, i1, i2 FROM data", conn)) + using (var reader = await cmd.ExecuteReaderAsync()) + { + await reader.ReadAsync(); + Assert.That(reader.GetValue(0), Is.EqualTo("infinity")); + Assert.That(reader.GetValue(1), Is.EqualTo("-infinity")); + Assert.That(reader.GetFieldValue(2), Is.EqualTo(Period.MaxValue)); + Assert.That(reader.GetFieldValue(3), Is.EqualTo(Period.MinValue)); + } + } + + [Test] + public async Task Inclusive_End_Range_Infinity_read() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( + "SELECT tstzrange('-infinity', 'infinity','[]') as val", conn); + + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + if (Statics.DisableDateTimeInfinityConversions) + { + Assert.That(() => reader[0], Throws.Exception.TypeOf()); + } + else + { + Assert.That(reader[0], Is.EqualTo(new Interval(Instant.MinValue, null))); + } + } + + protected override NpgsqlDataSource DataSource { get; } public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) { #if DEBUG - DisableDateTimeInfinityConversions = disableDateTimeInfinityConversions; Statics.DisableDateTimeInfinityConversions = disableDateTimeInfinityConversions; #else if (disableDateTimeInfinityConversions) @@ -288,13 +390,19 @@ public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) "NodaTimeInfinityTests rely on the Npgsql.DisableDateTimeInfinityConversions AppContext switch and can only be run in DEBUG builds"); } #endif + + var builder = CreateDataSourceBuilder(); + builder.UseNodaTime(); + builder.ConnectionStringBuilder.Options = "-c TimeZone=Europe/Berlin"; + DataSource = builder.Build(); } public void Dispose() { #if DEBUG - DisableDateTimeInfinityConversions = false; Statics.DisableDateTimeInfinityConversions = false; #endif + + DataSource.Dispose(); } } diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs similarity index 53% rename from test/Npgsql.NodaTime.Tests/NodaTimeTests.cs rename to test/Npgsql.PluginTests/NodaTimeTests.cs index 48ddfd265a..b678374aed 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -1,7 +1,8 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using NodaTime; +using Npgsql.NodaTime.Properties; using Npgsql.Tests; using NpgsqlTypes; using NUnit.Framework; @@ -10,26 +11,27 @@ // ReSharper disable AccessToModifiedClosure // ReSharper disable AccessToDisposedClosure -namespace Npgsql.NodaTime.Tests; +namespace Npgsql.PluginTests; -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing -public class NodaTimeTests : TestBase +public class NodaTimeTests : TestBase, IDisposable { #region Timestamp without time zone static readonly TestCaseData[] TimestampValues = - { + [ new TestCaseData(new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), "1998-04-12 13:26:38.789") .SetName("Timestamp_pre2000"), new TestCaseData(new LocalDateTime(2015, 1, 27, 8, 45, 12, 345), "2015-01-27 08:45:12.345") .SetName("Timestamp_post2000"), new TestCaseData(new LocalDateTime(1999, 12, 31, 23, 59, 59, 999).PlusNanoseconds(456000), "1999-12-31 23:59:59.999456") .SetName("Timestamp_with_microseconds") - }; + ]; [Test, TestCaseSource(nameof(TimestampValues))] public Task Timestamp_as_LocalDateTime(LocalDateTime localDateTime, string sqlLiteral) - => AssertType(localDateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2); + => AssertType(localDateTime, sqlLiteral, + "timestamp without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime2, DbType.Object)); [Test] public Task Timestamp_as_unspecified_DateTime() @@ -37,19 +39,15 @@ public Task Timestamp_as_unspecified_DateTime() new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "1998-04-12 13:26:38", "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime2, - isDefaultForReading: false); + dbType: DbType.DateTime2, valueTypeEqualsFieldType: false); [Test] public Task Timestamp_as_long() => AssertType( -54297202000000, "1998-04-12 13:26:38", - "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime2, - isDefault: false); + "timestamp without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime2, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public Task Timestamp_cannot_use_as_Instant() @@ -81,17 +79,39 @@ public Task Timestamp_cannot_use_as_DateTimeOffset() [Test] public Task Timestamp_cannot_write_utc_DateTime() - => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); + => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); [Test] - public Task Tsrange_as_NpgsqlRange_of_LocalDateTime() - => AssertType( + public async Task Tsrange_as_NpgsqlRange_of_LocalDateTime() + { + await AssertType( new NpgsqlRange( new(1998, 4, 12, 13, 26, 38), new(1998, 4, 12, 15, 26, 38)), - @"[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""]", + """["1998-04-12 13:26:38","1998-04-12 15:26:38"]""", "tsrange", - NpgsqlDbType.TimestampRange); + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] { new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38), + new(1998, 4, 12, 15, 26, 38)), }, + """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]"}""", + "tsrange[]", + dataTypeInference: DataTypeInference.Nothing, + skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38), + new(1998, 4, 12, 15, 26, 38)), }, + """{["1998-04-12 13:26:38","1998-04-12 15:26:38"]}""", + "tsmultirange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); + } [Test] public async Task Tsmultirange_as_array_of_NpgsqlRange_of_LocalDateTime() @@ -109,9 +129,9 @@ await AssertType( new(1998, 4, 13, 13, 26, 38), new(1998, 4, 13, 15, 26, 38)), }, - @"{[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""],[""1998-04-13 13:26:38"",""1998-04-13 15:26:38""]}", + """{["1998-04-12 13:26:38","1998-04-12 15:26:38"],["1998-04-13 13:26:38","1998-04-13 15:26:38"]}""", "tsmultirange", - NpgsqlDbType.TimestampMultirange); + dataTypeInference: DataTypeInference.Nothing); } #endregion Timestamp without time zone @@ -119,7 +139,7 @@ await AssertType( #region Timestamp with time zone static readonly TestCaseData[] TimestamptzValues = - { + [ new TestCaseData(new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), "1998-04-12 15:26:38+02") .SetName("Timestamptz_pre2000"), new TestCaseData(new LocalDateTime(2015, 1, 27, 8, 45, 12, 345).InUtc().ToInstant(), "2015-01-27 09:45:12.345+01") @@ -128,61 +148,53 @@ await AssertType( .SetName("Timestamptz_write_date_only"), new TestCaseData(new LocalDateTime(1999, 12, 31, 23, 59, 59, 999).PlusNanoseconds(456000).InUtc().ToInstant(), "2000-01-01 00:59:59.999456+01") .SetName("Timestamptz_with_microseconds") - }; + ]; [Test, TestCaseSource(nameof(TimestamptzValues))] public Task Timestamptz_as_Instant(Instant instant, string sqlLiteral) - => AssertType(instant, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime); + => AssertType(instant, sqlLiteral, + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object)); [Test] public Task Timestamptz_as_ZonedDateTime() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc(), "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_OffsetDateTime() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38).WithOffset(Offset.Zero), "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_utc_DateTime() => AssertType( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.DateTime, valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_DateTimeOffset() => AssertType( new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.DateTime, valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_long() => AssertType( -54297202000000, "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefault: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_cannot_use_as_LocalDateTime() @@ -190,56 +202,72 @@ public Task Timestamptz_cannot_use_as_LocalDateTime() [Test] public async Task Timestamptz_cannot_write_non_utc_ZonedDateTime() - => await AssertTypeUnsupportedWrite( + => await AssertTypeUnsupportedWrite( new LocalDateTime().InUtc().ToInstant().InZone(DateTimeZoneProviders.Tzdb["Europe/Berlin"]), "timestamp with time zone"); [Test] public async Task Timestamptz_cannot_write_non_utc_OffsetDateTime() - => await AssertTypeUnsupportedWrite(new LocalDateTime().WithOffset(Offset.FromHours(2)), "timestamp with time zone"); + => await AssertTypeUnsupportedWrite(new LocalDateTime().WithOffset(Offset.FromHours(2)), "timestamp with time zone"); [Test] public async Task Timestamptz_cannot_write_non_utc_DateTime() { - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); } [Test] - public Task Tstzrange_as_Interval() - => AssertType( + public async Task Tstzrange_as_Interval() + { + await AssertType( new Interval( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02"")", + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")""", "tstzrange", - NpgsqlDbType.TimestampTzRange); + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] { new Interval( + new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), + new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")"}""", + "tstzrange[]", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new Interval( + new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), + new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")}""", + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); + } [Test] public Task Tstzrange_with_no_end_as_Interval() => AssertType( - new Interval( - new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), null), - @"[""1998-04-12 15:26:38+02"",)", - "tstzrange", - NpgsqlDbType.TimestampTzRange); + new Interval(new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), null), + """["1998-04-12 15:26:38+02",)""", + "tstzrange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_as_Interval() => AssertType( - new Interval( null, - new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant()), - @"(,""1998-04-12 15:26:38+02"")", - "tstzrange", - NpgsqlDbType.TimestampTzRange); + new Interval(null, new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant()), + """(,"1998-04-12 15:26:38+02")""", + "tstzrange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_or_end_as_Interval() => AssertType( new Interval(null, null), - @"(,)", + """(,)""", "tstzrange", - NpgsqlDbType.TimestampTzRange); + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_Instant() @@ -247,10 +275,10 @@ public Task Tstzrange_as_NpgsqlRange_of_Instant() new NpgsqlRange( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", - "tstzrange", - NpgsqlDbType.TimestampTzRange, - isDefaultForReading: false); + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", + "tstzrange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() @@ -258,10 +286,10 @@ public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() new NpgsqlRange( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc()), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", - "tstzrange", - NpgsqlDbType.TimestampTzRange, - isDefaultForReading: false); + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", + "tstzrange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() @@ -269,10 +297,10 @@ public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() new NpgsqlRange( new LocalDateTime(1998, 4, 12, 13, 26, 38).WithOffset(Offset.Zero), new LocalDateTime(1998, 4, 12, 15, 26, 38).WithOffset(Offset.Zero)), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", - "tstzrange", - NpgsqlDbType.TimestampTzRange, - isDefaultForReading: false); + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", + "tstzrange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] public async Task Tstzmultirange_as_array_of_Interval() @@ -290,9 +318,8 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""),[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02"")}", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange); + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"),["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02")}""", + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -311,10 +338,9 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isDefaultForReading: false); + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -333,10 +359,9 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc()), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isDefaultForReading: false); + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -355,10 +380,9 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).WithOffset(Offset.Zero), new LocalDateTime(1998, 4, 13, 15, 26, 38).WithOffset(Offset.Zero)), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isDefaultForReading: false); + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -385,10 +409,8 @@ await AssertType( null, null) }, - @"{""[\""1998-04-12 15:26:38+02\"",\""1998-04-12 17:26:38+02\"")"",""[\""1998-04-13 15:26:38+02\"",\""1998-04-13 17:26:38+02\"")"",""[\""1998-04-13 15:26:38+02\"",)"",""(,\""1998-04-13 15:26:38+02\"")"",""(,)""}", - "tstzrange[]", - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, - isDefaultForWriting: false); + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",)","(,\"1998-04-13 15:26:38+02\")","(,)"}""", + "tstzrange[]", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -406,10 +428,9 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, - @"{""[\""1998-04-12 15:26:38+02\"",\""1998-04-12 17:26:38+02\""]"",""[\""1998-04-13 15:26:38+02\"",\""1998-04-13 17:26:38+02\""]""}", - "tstzrange[]", - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, - isDefault: false); + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", + "tstzrange[]", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } #endregion Timestamp with time zone @@ -418,32 +439,76 @@ await AssertType( [Test] public Task Date_as_LocalDate() - => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date); + => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", + "date", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.Date, DbType.Object)); [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2), valueTypeEqualsFieldType: false); [Test] public Task Date_as_int() - => AssertType(7579, "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(7579, "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.Int32), valueTypeEqualsFieldType: false); [Test] - public Task Daterange_as_DateInterval() - => AssertType( + public async Task Daterange_as_DateInterval() + { + await AssertType( new DateInterval(new(2002, 3, 4), new(2002, 3, 6)), "[2002-03-04,2002-03-07)", "daterange", - NpgsqlDbType.DateRange); + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // DateInterval[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, + """{"[2002-03-04,2002-03-07)"}""", + "daterange[]", dataTypeInference: DataTypeInference.Nothing, + skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, + """{[2002-03-04,2002-03-07)}""", + "datemultirange", dataTypeInference: DataTypeInference.Nothing, + skipArrayCheck: true); + } [Test] - public Task Daterange_as_NpgsqlRange_of_LocalDate() - => AssertType( + public async Task Daterange_as_NpgsqlRange_of_LocalDate() + { + await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", - "daterange", - NpgsqlDbType.DateRange, - isDefaultForReading: false); + "daterange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{"[2002-03-04,2002-03-06)"}""", + "daterange[]", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{[2002-03-04,2002-03-06)}""", + "datemultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); + } [Test] public async Task Datemultirange_as_array_of_DateInterval() @@ -459,7 +524,7 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", - NpgsqlDbType.DateMultirange); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -475,25 +540,42 @@ await AssertType( new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - NpgsqlDbType.DateMultirange, - isDefaultForReading: false); + "datemultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } -#if NET6_0_OR_GREATER [Test] public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", dbType: DbType.Date, valueTypeEqualsFieldType: false); [Test] - public Task Daterange_as_NpgsqlRange_of_DateOnly() - => AssertType( + public async Task Daterange_as_NpgsqlRange_of_DateOnly() + { + await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", - NpgsqlDbType.DateRange, - isDefaultForReading: false); -#endif + valueTypeEqualsFieldType: false, + skipArrayCheck: true); + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{"[2002-03-04,2002-03-06)"}""", + "daterange[]", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{[2002-03-04,2002-03-06)}""", + "datemultirange", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); + } [Test] public async Task Daterange_array_as_array_of_DateInterval() @@ -506,10 +588,8 @@ await AssertType( new DateInterval(new(2002, 3, 4), new(2002, 3, 5)), new DateInterval(new(2002, 3, 8), new(2002, 3, 10)) }, - @"{""[2002-03-04,2002-03-06)"",""[2002-03-08,2002-03-11)""}", - "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, - isDefaultForWriting: false); + """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", + "daterange[]", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -523,10 +603,8 @@ await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, - @"{""[2002-03-04,2002-03-06)"",""[2002-03-08,2002-03-11)""}", - "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, - isDefault: false); + """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", + "daterange[]", dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } #endregion Date @@ -535,29 +613,25 @@ await AssertType( [Test] public Task Time_as_LocalTime() - => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", "time without time zone", NpgsqlDbType.Time, DbType.Time); + => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", + "time without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.Time, DbType.Object)); [Test] public Task Time_as_TimeSpan() => AssertType( new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", - "time without time zone", - NpgsqlDbType.Time, - DbType.Time, - isDefault: false); + "time without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Time, DbType.Object), valueTypeEqualsFieldType: false); -#if NET6_0_OR_GREATER [Test] public Task Time_as_TimeOnly() => AssertType( new TimeOnly(10, 45, 34, 500), "10:45:34.5", - "time without time zone", - NpgsqlDbType.Time, - DbType.Time, - isDefaultForReading: false); -#endif + "time without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.Time, valueTypeEqualsFieldType: false); #endregion Time @@ -569,7 +643,7 @@ public Task TimeTz_as_OffsetTime() new OffsetTime(new LocalTime(1, 2, 3, 4).PlusNanoseconds(5000), Offset.FromHoursAndMinutes(3, 30) + Offset.FromSeconds(5)), "01:02:03.004005+03:30:05", "time with time zone", - NpgsqlDbType.TimeTz); + dataTypeInference: DataTypeInference.Nothing); [Test] public async Task TimeTz_as_DateTimeOffset() @@ -577,14 +651,13 @@ public async Task TimeTz_as_DateTimeOffset() await AssertTypeRead( "13:03:45.51+02", "time with time zone", - new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), isDefault: false); + new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), valueTypeEqualsFieldType: false); await AssertTypeWrite( new DateTimeOffset(1, 1, 1, 13, 3, 45, 510, TimeSpan.FromHours(2)), "13:03:45.51+02", - "time with time zone", - NpgsqlDbType.TimeTz, - isDefault: false); + "time with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.DateTime)); } #endregion Time with time zone @@ -608,7 +681,7 @@ public Task Interval_as_Period() }.Build().Normalize(), "1 year 2 mons 25 days 05:06:07.008009", "interval", - NpgsqlDbType.Interval); + dataTypeInference: DataTypeInference.Nothing); [Test] public Task Interval_as_Duration() @@ -616,45 +689,87 @@ public Task Interval_as_Duration() Duration.FromDays(5) + Duration.FromMinutes(4) + Duration.FromSeconds(3) + Duration.FromMilliseconds(2) + Duration.FromNanoseconds(1000), "5 days 00:04:03.002001", - "interval", - NpgsqlDbType.Interval, - isDefaultForReading: false); + "interval", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] - public Task Interval_as_Duration_with_months_fails() - => AssertTypeUnsupportedRead("2 months", "interval"); + public async Task Interval_as_Duration_with_months_fails() + { + var exception = await AssertTypeUnsupportedRead("2 months", "interval"); + Assert.That(exception.Message, Is.EqualTo(NpgsqlNodaTimeStrings.CannotReadIntervalWithMonthsAsDuration)); + } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3438")] public async Task Bug3438() { await using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var expected = Duration.FromSeconds(2148); cmd.Parameters.Add(new NpgsqlParameter("p1", NpgsqlDbType.Interval) { Value = expected }); cmd.Parameters.AddWithValue("p2", expected); - using var reader = cmd.ExecuteReader(); - reader.Read(); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); for (var i = 0; i < 2; i++) { Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(Period))); } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5867")] + public async Task Normalize_period_on_write() + { + var value = Period.FromTicks(-3675048768766); + var expected = value.Normalize(); + var expectedAfterRoundtripBuilder = expected.ToBuilder(); + // Postgres doesn't support nanoseconds, trim them to microseconds + expectedAfterRoundtripBuilder.Nanoseconds -= expected.Nanoseconds % 1000; + var expectedAfterRoundtrip = expectedAfterRoundtripBuilder.Build(); + + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); + cmd.Parameters.AddWithValue(value); + cmd.Parameters.AddWithValue(expected); + + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var dbValue = reader.GetFieldValue(0); + var dbExpected = reader.GetFieldValue(1); + + Assert.That(dbValue, Is.EqualTo(dbExpected)); + Assert.That(dbValue, Is.EqualTo(expectedAfterRoundtrip)); + } + + [Test] + public async Task Period_write_throw_on_overflow() + { + var periodBuilder = new PeriodBuilder + { + Years = int.MaxValue + }; + var ex = await AssertTypeUnsupportedWrite(periodBuilder.Build(), "interval"); + Assert.That(ex.Message, Is.EqualTo(NpgsqlNodaTimeStrings.CannotWritePeriodDueToOverflow)); + Assert.That(ex.InnerException, Is.TypeOf()); + } + #endregion Interval #region Support - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override NpgsqlDataSource DataSource { get; } + + public NodaTimeTests() { - var conn = await base.OpenConnectionAsync(connectionString); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; + var builder = CreateDataSourceBuilder(); + builder.UseNodaTime(); + builder.ConnectionStringBuilder.Options = "-c TimeZone=Europe/Berlin"; + DataSource = builder.Build(); } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) - => throw new NotSupportedException(); + public void Dispose() + => DataSource.Dispose(); #endregion Support } diff --git a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj index b7e0b21a09..a5d594024d 100644 --- a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj +++ b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj @@ -1,11 +1,20 @@ - + + + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + diff --git a/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj b/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj index 466d6550dd..268d891b33 100644 --- a/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj +++ b/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj @@ -1,4 +1,4 @@ - + @@ -10,4 +10,8 @@ + + + $(NoWarn);xUnit1004 + diff --git a/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs b/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs index c92cd069f9..ea72e86bcf 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs @@ -1,16 +1,33 @@ using AdoNet.Specification.Tests; +using Xunit; +using Xunit.Sdk; namespace Npgsql.Specification.Tests; -public sealed class NpgsqlCommandTests : CommandTestBase +public sealed class NpgsqlCommandTests(NpgsqlDbFactoryFixture fixture) : CommandTestBase(fixture) { - public NpgsqlCommandTests(NpgsqlDbFactoryFixture fixture) - : base(fixture) + public override void ExecuteReader_throws_when_transaction_required() { + // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, + // Npgsql completely ignores DbCommand.Transaction. + var ex = Assert.Throws(() => base.ExecuteReader_throws_when_transaction_required()); + Assert.Contains("No exception was thrown", ex.Message); } - // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, - // Npgsql completely ignores DbCommand.Transaction. - public override void ExecuteReader_throws_when_transaction_required() {} - public override void ExecuteReader_throws_when_transaction_mismatched() {} -} \ No newline at end of file + public override void ExecuteReader_throws_when_transaction_mismatched() + { + // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, + // Npgsql completely ignores DbCommand.Transaction. + var ex = Assert.Throws(() => base.ExecuteReader_throws_when_transaction_mismatched()); + Assert.Contains("No exception was thrown", ex.Message); + } + + // Skipped tests mark places where Npgsql currently diverges from AdoNet.Specification.Tests expectations. + // Some divergences may be by design; others may indicate compatibility gaps worth investigating. + + [Fact(Skip = "NpgsqlCommand.ExecuteReader() throws NpgsqlOperationInProgressException instead of InvalidOperationException when another reader is already open")] + public override void ExecuteReader_throws_when_reader_open() {} + + [Fact(Skip = "NpgsqlCommand.Execute() throws InvalidCastException instead of NotSupportedException for unknown ParameterValue types")] + public override void Execute_throws_for_unknown_ParameterValue_type() {} +} diff --git a/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs b/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs index fa71ea0f2f..b103128329 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs @@ -1,11 +1,20 @@ +using System.Threading.Tasks; using AdoNet.Specification.Tests; +using Xunit; namespace Npgsql.Specification.Tests; -public sealed class NpgsqlConnectionTests : ConnectionTestBase +public sealed class NpgsqlConnectionTests(NpgsqlDbFactoryFixture fixture) : ConnectionTestBase(fixture) { - public NpgsqlConnectionTests(NpgsqlDbFactoryFixture fixture) - : base(fixture) - { - } -} \ No newline at end of file + // Skipped tests mark places where Npgsql currently diverges from AdoNet.Specification.Tests expectations. + // Some divergences may be by design; others may indicate compatibility gaps worth investigating. + + [Fact(Skip = "NpgsqlConnection does not support the Disposed event")] + public override void Dispose_raises_Disposed() {} + + [Fact(Skip = "NpgsqlConnection does not support the Disposed event")] + public override Task DisposeAsync_raises_Disposed() => Task.CompletedTask; + + [Fact(Skip = "NpgsqlConnection.OpenAsync() does not throw OperationCanceledException when a canceled token is passed")] + public override Task OpenAsync_is_canceled() => Task.CompletedTask; +} diff --git a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs index 45bfb1a197..a58546af73 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs @@ -1,10 +1,50 @@ +using System.Threading.Tasks; using AdoNet.Specification.Tests; using Xunit; namespace Npgsql.Specification.Tests; -public sealed class NpgsqlDataReaderTests : DataReaderTestBase +public sealed class NpgsqlDataReaderTests(NpgsqlSelectValueFixture fixture) : DataReaderTestBase(fixture) { - public NpgsqlDataReaderTests(NpgsqlSelectValueFixture fixture) - : base(fixture) {} -} \ No newline at end of file + // Skipped tests mark places where Npgsql currently diverges from AdoNet.Specification.Tests expectations. + // Some divergences may be by design; others may indicate compatibility gaps worth investigating. + + [Fact(Skip = "NpgsqlDataReader.FieldCount throws ObjectDisposedException instead of InvalidOperationException")] + public override void FieldCount_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetBytes() throws ArgumentOutOfRangeException instead of returning 0 when dataOffset is too large")] + public override void GetBytes_reads_nothing_when_dataOffset_is_too_large() {} + + [Fact(Skip = "NpgsqlDataReader.GetChars() throws EndOfStreamException instead of returning 0 when dataOffset is too large")] + public override void GetChars_reads_nothing_when_dataOffset_is_too_large() {} + + [Fact(Skip = "NpgsqlDataReader.GetDataTypeName() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetDataTypeName_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetFieldType() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetFieldType_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetFieldValueAsync() does not throw an OperationCanceledException when a canceled token is passed")] + public override Task GetFieldValueAsync_is_canceled() => Task.CompletedTask; + + [Fact(Skip = "NpgsqlDataReader.GetName() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetName_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetTextReader() throws InvalidCastException when command text is null")] + public override void GetTextReader_returns_empty_for_null_String() {} + + [Fact(Skip = "NpgsqlDataReader.GetValue() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetValue_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.IsDBNull() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void IsDBNull_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.IsDBNullAsync() does not throw OperationCanceledException when a canceled token is passed")] + public override Task IsDBNullAsync_is_canceled() => Task.CompletedTask; + + [Fact(Skip = "NpgsqlDataReader.NextResult() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void NextResult_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.Read() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void Read_throws_when_closed() {} +} diff --git a/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs b/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs index 67f1d9f1b4..06bdb837f2 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs @@ -10,8 +10,7 @@ namespace Npgsql.Specification.Tests; public class NpgsqlSelectValueFixture : NpgsqlDbFactoryFixture, ISelectValueFixture, IDeleteFixture, IDisposable { public NpgsqlSelectValueFixture() - { - Utility.ExecuteNonQuery(this, @" + => Utility.ExecuteNonQuery(this, @" DROP TABLE IF EXISTS select_value; CREATE TABLE select_value ( @@ -39,7 +38,6 @@ INSERT INTO select_value VALUES (4, NULL, false, '0001-01-01', '0001-01-01', '0001-01-01', 0.000000000000001, 2.23e-308, '33221100-5544-7766-9988-aabbccddeeff', -32768, -2147483648, -9223372036854775808, 1.18e-38, NULL, '00:00:00'), (5, NULL, true, '9999-12-31', '9999-12-31 23:59:59.999', '9999-12-31 23:59:59.999 +14:00', 99999999999999999999.999999999999999, 1.79e308, 'ccddeeff-aabb-8899-7766-554433221100', 32767, 2147483647, 9223372036854775807, 3.40e38, NULL, '23:59:59.999'); "); - } public void Dispose() => Utility.ExecuteNonQuery(this, "DROP TABLE IF EXISTS select_value;"); @@ -51,8 +49,7 @@ public string CreateSelectSql(byte[] value) => public string SelectNoRows => "SELECT 1 WHERE 0 = 1;"; - public IReadOnlyCollection SupportedDbTypes { get; } = new ReadOnlyCollection(new[] - { + public IReadOnlyCollection SupportedDbTypes { get; } = new ReadOnlyCollection([ DbType.Binary, DbType.Boolean, DbType.Date, @@ -67,9 +64,9 @@ public string CreateSelectSql(byte[] value) => DbType.Single, DbType.String, DbType.Time - }); + ]); public Type NullValueExceptionType => typeof(InvalidCastException); public string DeleteNoRows => "DELETE FROM select_value WHERE 1 = 0"; -} \ No newline at end of file +} diff --git a/test/Npgsql.Specification.Tests/Utility.cs b/test/Npgsql.Specification.Tests/Utility.cs index 9e91767d55..51bdc18dcd 100644 --- a/test/Npgsql.Specification.Tests/Utility.cs +++ b/test/Npgsql.Specification.Tests/Utility.cs @@ -1,4 +1,3 @@ -using System; using AdoNet.Specification.Tests; namespace Npgsql.Specification.Tests; diff --git a/test/Npgsql.Tests/App.config b/test/Npgsql.Tests/App.config index dcd0a07c0b..e9e8771144 100644 --- a/test/Npgsql.Tests/App.config +++ b/test/Npgsql.Tests/App.config @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Tests/AsyncTests.cs b/test/Npgsql.Tests/AsyncTests.cs index 3d7ebc3300..591b94e0ca 100644 --- a/test/Npgsql.Tests/AsyncTests.cs +++ b/test/Npgsql.Tests/AsyncTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; using System.Data; using System.Threading.Tasks; using static Npgsql.Tests.TestUtil; diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 5231ff03c0..a3765d41ae 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -7,33 +7,28 @@ using Npgsql.Properties; using Npgsql.Tests.Support; using NUnit.Framework; -using static Npgsql.Util.Statics; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; -public class AuthenticationTests : MultiplexingTestBase +public class AuthenticationTests : TestBase { [Test] [NonParallelizable] // Sets environment variable public async Task Connect_UserNameFromEnvironment_Succeeds() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { IntegratedSecurity = false }; - using var _ = SetEnvironmentVariable("PGUSER", builder.Username); - builder.Username = null; - using var __ = CreateTempPool(builder.ConnectionString, out var connectionString); - using var ___ = await OpenConnectionAsync(connectionString); + using var _ = SetEnvironmentVariable("PGUSER", new NpgsqlConnectionStringBuilder(ConnectionString).Username); + await using var dataSource = CreateDataSource(csb => csb.Username = null); + await using var __ = await dataSource.OpenConnectionAsync(); } [Test] [NonParallelizable] // Sets environment variable public async Task Connect_PasswordFromEnvironment_Succeeds() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { IntegratedSecurity = false }; - using var _ = SetEnvironmentVariable("PGPASSWORD", builder.Password); - builder.Password = null; - using var __ = CreateTempPool(builder.ConnectionString, out var connectionString); - using var ___ = await OpenConnectionAsync(connectionString); + using var _ = SetEnvironmentVariable("PGPASSWORD", new NpgsqlConnectionStringBuilder(ConnectionString).Password); + await using var dataSource = CreateDataSource(csb => csb.Passfile = null); + await using var __ = await dataSource.OpenConnectionAsync(); } [Test] @@ -52,6 +47,38 @@ public async Task Set_Password_on_NpgsqlDataSource() await using var connection2 = dataSource.OpenConnection(); } + [Test] + public async Task Password_provider([Values]bool async) + { + var dataSourceBuilder = GetPasswordlessDataSourceBuilder(); + var password = new NpgsqlConnectionStringBuilder(TestUtil.ConnectionString).Password!; + var syncProviderCalled = false; + var asyncProviderCalled = false; + dataSourceBuilder.UsePasswordProvider(_ => + { + syncProviderCalled = true; + return password; + }, (_,_) => + { + asyncProviderCalled = true; + return new(password); + }); + + using var dataSource = dataSourceBuilder.Build(); + using var conn = async ? await dataSource.OpenConnectionAsync() : dataSource.OpenConnection(); + Assert.That(async ? asyncProviderCalled : syncProviderCalled, "Password_provider not used"); + } + + [Test] + public void Password_provider_exception() + { + var dataSourceBuilder = GetPasswordlessDataSourceBuilder(); + dataSourceBuilder.UsePasswordProvider(_ => throw new Exception(), (_,_) => throw new Exception()); + + using var dataSource = dataSourceBuilder.Build(); + Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync()); + } + [Test] public async Task Periodic_password_provider() { @@ -134,6 +161,17 @@ public void Both_password_and_password_provider_is_not_supported() .With.Message.EqualTo(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword)); } + [Test] + public void Multiple_password_providers_is_not_supported() + { + var dataSourceBuilder = new NpgsqlDataSourceBuilder(TestUtil.ConnectionString); + dataSourceBuilder + .UsePeriodicPasswordProvider((_, _) => new("foo"), TimeSpan.FromMinutes(1), TimeSpan.FromSeconds(10)) + .UsePasswordProvider(_ => "foo", (_,_) => new("foo")); + Assert.That(() => dataSourceBuilder.Build(), Throws.Exception.TypeOf() + .With.Message.EqualTo(NpgsqlStrings.CannotSetMultiplePasswordProviderKinds)); + } + #region pgpass [Test] @@ -142,18 +180,17 @@ public async Task Use_pgpass_from_connection_string() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - - var password = builder.Password; - builder.Password = null; - var passFile = Path.GetTempFileName(); - File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); - builder.Passfile = passFile; + File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{builder.Password}"); try { - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => + { + csb.Passfile = null; + csb.Passfile = passFile; + }); + await using var conn = await dataSource.OpenConnectionAsync(); } finally { @@ -167,18 +204,14 @@ public async Task Use_pgpass_from_environment_variable() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - - var password = builder.Password; - builder.Password = null; - var passFile = Path.GetTempFileName(); - File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); + File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{builder.Password}"); using var passFileVariable = SetEnvironmentVariable("PGPASSFILE", passFile); try { - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => csb.Password = null); + await using var conn = await dataSource.OpenConnectionAsync(); } finally { @@ -191,10 +224,6 @@ public async Task Use_pgpass_from_environment_variable() public async Task Use_pgpass_from_homedir() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - - var password = builder.Password; - builder.Password = null; string? dirToDelete = null; string passFile; @@ -222,9 +251,10 @@ public async Task Use_pgpass_from_homedir() try { - File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + var builder = new NpgsqlConnectionStringBuilder(ConnectionString); + File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{builder.Password}"); + await using var dataSource = CreateDataSource(csb => csb.Passfile = null); + await using var conn = await dataSource.OpenConnectionAsync(); } finally { @@ -243,8 +273,8 @@ public async Task Use_pgpass_from_homedir() public void Password_source_precedence() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); + var builder = new NpgsqlConnectionStringBuilder(ConnectionString); var password = builder.Password; var passwordBad = password + "_bad"; @@ -257,52 +287,73 @@ public void Password_source_precedence() File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); File.WriteAllText(passFileBad, $"*:*:*:{builder.Username}:{passwordBad}"); - using (var passFileVariable = SetEnvironmentVariable("PGPASSFILE", passFileBad)) + using (SetEnvironmentVariable("PGPASSFILE", passFileBad)) { // Password from the connection string goes first - using (var passwordVariable = SetEnvironmentVariable("PGPASSWORD", passwordBad)) - Assert.That(OpenConnection(password, passFileBad), Throws.Nothing); + using (SetEnvironmentVariable("PGPASSWORD", passwordBad)) + { + using var dataSource1 = CreateDataSource(csb => + { + csb.Password = password; + csb.Passfile = passFileBad; + }); + + Assert.That(() => dataSource1.OpenConnection(), Throws.Nothing); + } // Password from the environment variable goes second - using (var passwordVariable = SetEnvironmentVariable("PGPASSWORD", password)) - Assert.That(OpenConnection(password: null, passFileBad), Throws.Nothing); + using (SetEnvironmentVariable("PGPASSWORD", password)) + { + using var dataSource2 = CreateDataSource(csb => + { + csb.Password = null; + csb.Passfile = passFileBad; + }); + + Assert.That(() => dataSource2.OpenConnection(), Throws.Nothing); + } // Passfile from the connection string goes third - Assert.That(OpenConnection(password: null, passFile: passFile), Throws.Nothing); + using var dataSource3 = CreateDataSource(csb => + { + csb.Password = null; + csb.Passfile = passFile; + }); + + Assert.That(() => dataSource3.OpenConnection(), Throws.Nothing); } // Passfile from the environment variable goes fourth - using (var passFileVariable = SetEnvironmentVariable("PGPASSFILE", passFile)) - Assert.That(OpenConnection(password: null, passFile: null), Throws.Nothing); - - Func OpenConnection(string? password, string? passFile) => async () => + using (SetEnvironmentVariable("PGPASSFILE", passFile)) { - builder.Password = password; - builder.Passfile = passFile; - builder.IntegratedSecurity = false; - builder.ApplicationName = $"{nameof(Password_source_precedence)}:{Guid.NewGuid()}"; + using var dataSource4 = CreateDataSource(csb => + { + csb.Password = null; + csb.Passfile = null; + }); - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var connection = await OpenConnectionAsync(connectionString); - }; + Assert.That(() => dataSource4.OpenConnection(), Throws.Nothing); + } + + static DeferDisposable Defer(Action action) => new(action); + } + + readonly struct DeferDisposable(Action action) : IDisposable + { + public void Dispose() => action(); } [Test, Description("Connects with a bad password to ensure the proper error is thrown")] public void Authentication_failure() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Password = "bad" - }; - using (CreateTempPool(builder, out var connectionString)) - using (var conn = new NpgsqlConnection(connectionString)) - { - Assert.That(() => conn.OpenAsync(), Throws.Exception - .TypeOf() - .With.Property(nameof(PostgresException.SqlState)).StartsWith("28") - ); - Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Closed)); - } + using var dataSource = CreateDataSource(csb => csb.Password = "bad"); + using var conn = dataSource.CreateConnection(); + + Assert.That(() => conn.OpenAsync(), Throws.Exception + .TypeOf() + .With.Property(nameof(PostgresException.SqlState)).StartsWith("28") + ); + Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Closed)); } [Test, Description("Simulates a timeout during the authentication phase")] @@ -311,15 +362,14 @@ public async Task Timeout_during_authentication() { var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { Timeout = 1 }; await using var postmasterMock = new PgPostmasterMock(builder.ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - - var __ = postmasterMock.AcceptServer(); + _ = postmasterMock.AcceptServer(); // The server will accept a connection from the client, but will not respond to the client's authentication // request. This should trigger a timeout - Assert.That(async () => await OpenConnectionAsync(connectionString), - Throws.Exception.TypeOf() - .With.InnerException.TypeOf()); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var connection = dataSource.CreateConnection(); + var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync()); + Assert.That(ex.InnerException, Is.TypeOf()); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1180")] @@ -341,8 +391,11 @@ public void Pool_by_password() public async Task AuthenticateIntegratedSecurity() { await using var dataSource = NpgsqlDataSource.Create(new NpgsqlConnectionStringBuilder(ConnectionString) - { IntegratedSecurity = true, Username = null, Password = null }); - await using var c = await dataSource.OpenConnectionAsync(); + { + Username = null, + Password = null + }); + await using var c = await dataSource.OpenConnectionAsync(); Assert.That(c.State, Is.EqualTo(ConnectionState.Open)); } @@ -364,16 +417,14 @@ public async Task ProvidePasswordCallback_is_used() using (var conn = new NpgsqlConnection(builder.ConnectionString) { ProvidePasswordCallback = ProvidePasswordCallback }) { conn.Open(); - Assert.True(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); + Assert.That(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); - // Do this again, since with multiplexing the very first connection attempt is done via - // the non-multiplexing path, to surface any exceptions. NpgsqlConnection.ClearPool(conn); conn.Close(); getPasswordDelegateWasCalled = false; conn.Open(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); - Assert.True(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); + Assert.That(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); } string ProvidePasswordCallback(string host, int port, string database, string username) @@ -392,8 +443,6 @@ public void ProvidePasswordCallback_is_not_used() { conn.Open(); - // Do this again, since with multiplexing the very first connection attempt is done via - // the non-multiplexing path, to surface any exceptions. NpgsqlConnection.ClearPool(conn); conn.Close(); conn.Open(); @@ -446,10 +495,10 @@ public void ProvidePasswordCallback_gets_correct_arguments() using (var conn = new NpgsqlConnection(builder.ConnectionString) { ProvidePasswordCallback = ProvidePasswordCallback }) { conn.Open(); - Assert.AreEqual(builder.Host, receivedHost); - Assert.AreEqual(builder.Port, receivedPort); - Assert.AreEqual(builder.Database, receivedDatabase); - Assert.AreEqual(builder.Username, receivedUsername); + Assert.That(receivedHost, Is.EqualTo(builder.Host)); + Assert.That(receivedPort, Is.EqualTo(builder.Port)); + Assert.That(receivedDatabase, Is.EqualTo(builder.Database)); + Assert.That(receivedUsername, Is.EqualTo(builder.Username)); } string ProvidePasswordCallback(string host, int port, string database, string username) @@ -475,6 +524,4 @@ NpgsqlDataSourceBuilder GetPasswordlessDataSourceBuilder() Password = null } }; - - public AuthenticationTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index c81affc542..b7adfc8b10 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using NUnit.Framework; using System; using System.Data; @@ -13,14 +13,12 @@ public class AutoPrepareTests : TestBase [Test] public void Basic() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); checkCmd.Prepare(); @@ -49,14 +47,12 @@ public void Basic() [Test, Description("Passes the maximum limit for autoprepared statements, recycling the least-recently used one")] public void Recycle() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); checkCmd.Prepare(); @@ -92,15 +88,13 @@ public void Recycle() [Test] public void Persist() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); - using var _ = CreateTempPool(csb, out var connString); - - using (var conn = OpenConnection(connString)) + using (var conn = dataSource.OpenConnection()) using (var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn)) { checkCmd.Prepare(); @@ -110,7 +104,7 @@ public void Persist() // We now have two prepared statements which should be persisted - using (var conn = OpenConnection(connString)) + using (var conn = dataSource.OpenConnection()) using (var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn)) { checkCmd.Prepare(); @@ -127,14 +121,12 @@ public void Persist() [Test] public async Task Positional_parameter() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 2 - }; - - await using var conn = await OpenConnectionAsync(csb); - conn.UnprepareAll(); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 2; + }); + await using var conn = await dataSource.OpenConnectionAsync(); await using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); await checkCmd.PrepareAsync(); @@ -153,13 +145,12 @@ public async Task Positional_parameter() [Test] public void Promote_auto_to_explicit() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); using var cmd1 = new NpgsqlCommand("SELECT 1", conn); using var cmd2 = new NpgsqlCommand("SELECT 1", conn); @@ -177,18 +168,21 @@ public void Promote_auto_to_explicit() // cmd1's statement is no longer valid (has been closed), make sure it still works (will run unprepared) cmd2.ExecuteScalar(); + + // Trigger autoprepare on a different query to confirm we didn't leave replaced statement in a bad state + using var cmd3 = new NpgsqlCommand("SELECT 2", conn); + cmd3.ExecuteNonQuery(); cmd3.ExecuteNonQuery(); } [Test] public void Candidate_eject() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 3 - }; - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 3; + }); + using var conn = dataSource.OpenConnection(); using var cmd = conn.CreateCommand(); for (var i = 0; i < PreparedStatementManager.CandidateCount; i++) @@ -223,13 +217,12 @@ public void Candidate_eject() [Test] public void One_command_same_sql_twice() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 1; SELECT 1; SELECT 1", conn); //cmd.Prepare(); //Assert.That(cmd.IsPrepared, Is.True); @@ -240,14 +233,13 @@ public void One_command_same_sql_twice() [Test] public void Across_close_open_different_connector() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var _ = CreateTempPool(csb, out var connString); - using var conn1 = new NpgsqlConnection(connString); - using var conn2 = new NpgsqlConnection(connString); + using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn1 = dataSource.CreateConnection(); + using var conn2 = dataSource.CreateConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); conn1.Open(); cmd.ExecuteNonQuery(); cmd.ExecuteNonQuery(); @@ -266,14 +258,12 @@ public void Across_close_open_different_connector() [Test] public void Unprepare_all() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn); cmd.Prepare(); // Explicit conn.ExecuteNonQuery("SELECT 2"); conn.ExecuteNonQuery("SELECT 2"); // Auto @@ -285,14 +275,12 @@ public void Unprepare_all() [Test, Description("Prepares the same SQL with different parameters (overloading)")] public void Overloaded_sql() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { cmd.Parameters.AddWithValue("p", NpgsqlDbType.Integer, 8); @@ -319,14 +307,12 @@ public void Derive_parameters_for_auto_prepared_statement() { const string query = "SELECT @p::integer"; const int answer = 42; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); using var cmd = new NpgsqlCommand(query, conn); checkCmd.Prepare(); @@ -351,12 +337,12 @@ public void Derive_parameters_for_auto_prepared_statement() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2644")] public void Row_description_properly_cloned() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnection(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); conn.UnprepareAll(); using var cmd1 = new NpgsqlCommand("SELECT 1 AS foo", conn); using var cmd2 = new NpgsqlCommand("SELECT 1 AS bar", conn); @@ -371,55 +357,47 @@ public void Row_description_properly_cloned() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3106")] public async Task Dont_auto_prepare_more_than_max_statements_in_batch() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 50, - }; + const int maxAutoPrepare = 50; - await using var connection = await OpenConnectionAsync(builder); - connection.UnprepareAll(); + await using var dataSource = CreateDataSource(csb => csb.MaxAutoPrepare = maxAutoPrepare); + await using var connection = await dataSource.OpenConnectionAsync(); for (var i = 0; i < 100; i++) { - using var command = connection.CreateCommand(); + await using var command = connection.CreateCommand(); command.CommandText = string.Join("", Enumerable.Range(0, 100).Select(n => $"SELECT {n};")); await command.ExecuteNonQueryAsync(); } - Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(builder.MaxAutoPrepare)); + Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(maxAutoPrepare)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3106")] public async Task Dont_auto_prepare_more_than_max_statements_in_batch_random() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 10, - }; + const int maxAutoPrepare = 10; - await using var connection = await OpenConnectionAsync(builder); - connection.UnprepareAll(); + await using var dataSource = CreateDataSource(csb => csb.MaxAutoPrepare = maxAutoPrepare); + await using var connection = await dataSource.OpenConnectionAsync(); var random = new Random(1); for (var i = 0; i < 100; i++) { - using var command = connection.CreateCommand(); + await using var command = connection.CreateCommand(); command.CommandText = string.Join("", Enumerable.Range(0, 100).Select(n => $"SELECT {random.Next(200)};")); await command.ExecuteNonQueryAsync(); } - Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(builder.MaxAutoPrepare)); + Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(maxAutoPrepare)); } [Test] public async Task Replace_and_execute_within_same_batch() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 1, - AutoPrepareMinUsages = 2 - }; - - await using var connection = await OpenConnectionAsync(builder); - connection.UnprepareAll(); + csb.MaxAutoPrepare = 1; + csb.AutoPrepareMinUsages = 2; + }); + await using var connection = await dataSource.OpenConnectionAsync(); for (var i = 0; i < 2; i++) await connection.ExecuteNonQueryAsync("SELECT 1"); @@ -429,26 +407,26 @@ public async Task Replace_and_execute_within_same_batch() } // Exclude some internal Npgsql queries which include pg_type as well as the count statement itself - const string CountPreparedStatements = @" + const string CountPreparedStatements = """ SELECT COUNT(*) FROM pg_prepared_statements - WHERE statement NOT LIKE '%pg_prepared_statements%' - AND statement NOT LIKE '%pg_type%'"; +WHERE statement NOT LIKE '%pg_prepared_statements%' +AND statement NOT LIKE '%pg_type%' +"""; [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2665")] public async Task Auto_prepared_command_failure() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - await using var conn = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var conn = await dataSource.OpenConnectionAsync(); var tableName = await GetTempTableName(conn); - conn.UnprepareAll(); await conn.ExecuteNonQueryAsync($"CREATE TABLE {tableName} (id integer)"); - using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) + await using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) { await command.ExecuteNonQueryAsync(); await conn.ExecuteNonQueryAsync($"DROP TABLE {tableName}"); @@ -457,7 +435,7 @@ public async Task Auto_prepared_command_failure() await conn.ExecuteNonQueryAsync($"CREATE TABLE {tableName} (id integer)"); - using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) + await using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) { await command.ExecuteNonQueryAsync(); await command.ExecuteNonQueryAsync(); @@ -467,14 +445,12 @@ public async Task Auto_prepared_command_failure() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3002")] public void Replace_with_bad_sql() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 2, - AutoPrepareMinUsages = 1 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 2; + csb.AutoPrepareMinUsages = 1; + }); + using var conn = dataSource.OpenConnection(); conn.ExecuteNonQuery("SELECT 1"); conn.ExecuteNonQuery("SELECT 2"); @@ -500,21 +476,21 @@ public void Replace_with_bad_sql() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4082")] public async Task Batch_statement_execution_error_cleanup() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 2, - AutoPrepareMinUsages = 1 - }; - - await using var conn = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 2; + csb.AutoPrepareMinUsages = 1; + }); + await using var conn = await dataSource.OpenConnectionAsync(); var funcName = await GetTempFunctionName(conn); // Create a function we can use to raise an error with a single statement - conn.ExecuteNonQuery(@$" - CREATE OR REPLACE FUNCTION {funcName}() RETURNS VOID AS - 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' - LANGUAGE 'plpgsql'; - "); + await conn.ExecuteNonQueryAsync( +$""" +CREATE OR REPLACE FUNCTION {funcName}() RETURNS VOID AS + 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' +LANGUAGE 'plpgsql'; +"""); conn.UnprepareAll(); @@ -542,35 +518,92 @@ public async Task Batch_statement_execution_error_cleanup() Assert.That(await conn.ExecuteScalarAsync("SELECT 3"), Is.EqualTo(3)); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4404")] + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4404"), IssueLink("https://github.com/npgsql/npgsql/issues/5220")] public async Task SchemaOnly() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 10, - }; - - using var _ = CreateTempPool(csb, out var connString); - await using var conn = await OpenConnectionAsync(connString); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 10; + }); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT 1", conn); for (var i = 0; i < 5; i++) { await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); } + + // Make sure there is no protocol desync due to #5220 + await cmd.ExecuteScalarAsync(); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6038")] + public async Task Auto_prepared_schema_only_correct_schema() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 1; + csb.AutoPrepareMinUsages = 5; + }); + await using var connection = await dataSource.OpenConnectionAsync(); + var table1 = await CreateTempTable(connection, "foo int"); + var table2 = await CreateTempTable(connection, "bar int"); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = $"SELECT * FROM {table1}"; + for (var i = 0; i < 5; i++) + { + // Make sure we prepare the first query + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + } + + cmd.CommandText = $"SELECT * FROM {table2}"; + // The second query will load RowDescription, which is a singleton on NpgsqlConnector + // This shouldn't affect the first query, because we create a copy of RowDescription on prepare + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + + cmd.CommandText = $"SELECT * FROM {table1}"; + // If we indeed made a copy of RowDescription on prepare, we should get the column for the first query and not for the second + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); + var columns = await reader.GetColumnSchemaAsync(); + Assert.That(columns.Count, Is.EqualTo(1)); + Assert.That(columns[0].ColumnName, Is.EqualTo("foo")); } [Test] - public async Task Auto_prepared_statement_invalidation() + public async Task Auto_prepared_schema_only_replace() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 1; + csb.AutoPrepareMinUsages = 5; + }); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT 1"; + for (var i = 0; i < 5; i++) + { + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + } + + cmd.CommandText = "SELECT 2"; + for (var i = 0; i < 5; i++) { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + } + } - await using var connection = await OpenConnectionAsync(csb); + [Test] + public async Task Auto_prepared_statement_invalidation() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var connection = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(connection, "foo int"); await using var command = new NpgsqlCommand($"SELECT * FROM {table}", connection); @@ -587,6 +620,33 @@ public async Task Auto_prepared_statement_invalidation() Assert.DoesNotThrowAsync(() => command.ExecuteNonQueryAsync()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6432")] + public async Task Reuse_batch_with_different_connectors() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var batch = new NpgsqlBatch(); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 1")); + await using (var connection = await dataSource.OpenConnectionAsync()) + { + batch.Connection = connection; + + for (var i = 0; i < 2; i++) + await batch.ExecuteNonQueryAsync(); + } + + dataSource.Clear(); + + await using (var connection = await dataSource.OpenConnectionAsync()) + { + batch.Connection = connection; + await batch.ExecuteNonQueryAsync(); + } + } + void DumpPreparedStatements(NpgsqlConnection conn) { using var cmd = new NpgsqlCommand("SELECT name,statement FROM pg_prepared_statements", conn); diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 342076714a..0a8daccac7 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -1,4 +1,3 @@ -using Npgsql.Util; using NUnit.Framework; using System; using System.Collections.Generic; @@ -9,11 +8,9 @@ namespace Npgsql.Tests; -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.SequentialAccess)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] -public class BatchTests : MultiplexingTestBase +[TestFixture(CommandBehavior.Default)] +[TestFixture(CommandBehavior.SequentialAccess)] +public class BatchTests : TestBase, IDisposable { #region Parameters @@ -71,24 +68,6 @@ public async Task Positional_parameters() Assert.That(await reader.NextResultAsync(), Is.False); } - [Test] - public async Task Out_parameters_are_not_allowed() - { - await using var conn = await OpenConnectionAsync(); - await using var batch = new NpgsqlBatch(conn) - { - BatchCommands = - { - new("SELECT @p1") - { - Parameters = { new("p", 8) { Direction = ParameterDirection.InputOutput } } - } - } - }; - - Assert.That(() => batch.ExecuteReaderAsync(Behavior), Throws.Exception.TypeOf()); - } - #endregion Parameters #region NpgsqlBatchCommand @@ -243,6 +222,29 @@ public async Task StatementType_Call() Assert.That(batch.BatchCommands[0].StatementType, Is.EqualTo(StatementType.Call)); } + [Test] + public async Task CommandType_StoredProcedure() + { + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "11.0", "Stored procedures are supported starting with PG 11"); + + var sproc = await GetTempProcedureName(conn); + await conn.ExecuteNonQueryAsync($"CREATE PROCEDURE {sproc}() LANGUAGE sql AS ''"); + + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = { new($"{sproc}") {CommandType = CommandType.StoredProcedure} } + }; + + await using var reader = await batch.ExecuteReaderAsync(Behavior); + + // Consume SELECT result set to parse the CommandComplete + await reader.CloseAsync(); + + Assert.That(batch.BatchCommands[0].StatementType, Is.EqualTo(StatementType.Call)); + } + + [Test] public async Task StatementType_Merge() { @@ -289,6 +291,12 @@ public async Task StatementOID() Assert.That(batch.BatchCommands[1].OID, Is.EqualTo(0)); } + [Test] + public void CanCreateParameter() => Assert.That(new NpgsqlBatchCommand().CanCreateParameter); + + [Test] + public void CreateParameter() => Assert.That(new NpgsqlBatchCommand().CreateParameter(), Is.Not.Null); + #endregion NpgsqlBatchCommand #region Command behaviors @@ -466,7 +474,9 @@ public async Task Batch_with_multiple_errors([Values] bool withErrorBarriers) [Test] public async Task Batch_close_dispose_reader_with_multiple_errors([Values] bool withErrorBarriers, [Values] bool dispose) { - await using var conn = await OpenConnectionAsync(); + // Create a temp pool since we dispose the reader (and check the state afterwards) and it can be reused by another connection + await using var dataSource = CreateDataSource(x => x.IncludeFailedBatchedCommand = true); + await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "id INT"); await using var batch = new NpgsqlBatch(conn) @@ -657,7 +667,7 @@ public async Task Empty_batch() } [Test] - public async Task Semicolon_is_not_allowed() + public async Task Semicolon_is_not_allowed_with_no_parameters() { await using var conn = await OpenConnectionAsync(); await using var batch = new NpgsqlBatch(conn) @@ -665,6 +675,24 @@ public async Task Semicolon_is_not_allowed() BatchCommands = { new("SELECT 1; SELECT 2") } }; + Assert.That(() => batch.ExecuteReaderAsync(Behavior), Throws.Exception.TypeOf()); + } + + [Test] + public async Task Semicolon_is_not_allowed_with_named_parameters() + { + await using var conn = await OpenConnectionAsync(); + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = + { + new("SELECT @p1; SELECT 2") + { + Parameters = { new("p1", 1) } + } + } + }; + Assert.That(() => batch.ExecuteReaderAsync(Behavior), Throws.Exception.TypeOf()); } @@ -690,7 +718,7 @@ await conn.ExecuteNonQueryAsync($@" // resources are referenced by the exception above, which is very likely to escape the using statement of the command. batch.Dispose(); var cmd2 = conn.CreateBatch(); - Assert.AreNotSame(cmd2, batch); + Assert.That(batch, Is.Not.SameAs(cmd2)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] @@ -711,7 +739,6 @@ await conn.ExecuteNonQueryAsync($@" await using (var reader = await batch.ExecuteReaderAsync(Behavior)) { - var e = Assert.ThrowsAsync(async () => await reader.NextResultAsync())!; Assert.That(e.BatchCommand, Is.SameAs(batch.BatchCommands[1])); } @@ -720,7 +747,7 @@ await conn.ExecuteNonQueryAsync($@" // resources are referenced by the exception above, which is very likely to escape the using statement of the command. batch.Dispose(); var cmd2 = conn.CreateBatch(); - Assert.AreNotSame(cmd2, batch); + Assert.That(batch, Is.Not.SameAs(cmd2)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4202")] @@ -731,101 +758,60 @@ public async Task ExecuteScalar_without_parameters() Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(1)); } - #endregion Miscellaneous - - #region Logging - - [Test] - public async Task Log_ExecuteScalar_single_statement_without_parameters() + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4264")] + public async Task Batch_with_auto_prepare_reuse() { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var dataSource = CreateDataSource(csb => csb.MaxAutoPrepare = 20); await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlBatch(conn) - { - BatchCommands = { new("SELECT 1") } - }; - using (listLoggerProvider.Record()) + var tempTableName = await CreateTempTable(conn, "id int"); + + await using var batch = new NpgsqlBatch(conn); + for (var i = 0; i < 2; ++i) { - await cmd.ExecuteScalarAsync(); + for (var j = 0; j < 10; ++j) + { + batch.BatchCommands.Add(new NpgsqlBatchCommand($"DELETE FROM {tempTableName} WHERE 1=0")); + } + await batch.ExecuteNonQueryAsync(); + batch.BatchCommands.Clear(); } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } - [Test] - public async Task Log_ExecuteScalar_multiple_statements_with_parameters() + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5239")] + public async Task Batch_dispose_reuse() { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var batch = new NpgsqlBatch(conn) + await using var conn = await OpenConnectionAsync(); + NpgsqlBatch firstBatch; + await using (var batch = conn.CreateBatch()) { - BatchCommands = - { - new("SELECT $1") { Parameters = { new() { Value = 8 } } }, - new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } - } - }; + firstBatch = batch; - using (listLoggerProvider.Record()) - { - await batch.ExecuteScalarAsync(); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 1")); + Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(1)); } - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - - // Note: the message formatter of Microsoft.Extensions.Logging doesn't seem to handle arrays inside tuples, so we get the - // following ugliness (https://github.com/dotnet/runtime/issues/63165). Serilog handles this fine. - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, 9, System.Object[])]")); - AssertLoggingStateDoesNotContain(executingCommandEvent, "CommandText"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + await using (var batch = conn.CreateBatch()) + { + Assert.That(batch, Is.SameAs(firstBatch)); - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 2")); + Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(2)); + } - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); - Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1, 9")); - Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); - } + await conn.CloseAsync(); + await conn.OpenAsync(); - [Test] - public async Task Log_ExecuteScalar_single_statement_with_parameter_logging_off() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var batch = new NpgsqlBatch(conn) + await using (var batch = conn.CreateBatch()) { - BatchCommands = - { - new("SELECT $1") { Parameters = { new() { Value = 8 } } }, - new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } - } - }; + Assert.That(batch, Is.SameAs(firstBatch)); - using (listLoggerProvider.Record()) - { - await batch.ExecuteScalarAsync(); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 3")); + Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(3)); } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1, 9]")); - var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[1], Is.EqualTo("SELECT $1, 9")); } - #endregion Logging + #endregion Miscellaneous #region Initialization / setup / teardown @@ -834,11 +820,16 @@ public async Task Log_ExecuteScalar_single_statement_with_parameter_logging_off( readonly CommandBehavior Behavior; // ReSharper restore InconsistentNaming - public BatchTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) : base(multiplexingMode) + NpgsqlDataSource? _dataSource; + protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.IncludeFailedBatchedCommand = true); + + public BatchTests(CommandBehavior behavior) { Behavior = behavior; IsSequential = (Behavior & CommandBehavior.SequentialAccess) != 0; } + public void Dispose() => DataSource.Dispose(); + #endregion } diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index fac6e71c92..7c57ce7723 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -1,20 +1,23 @@ -using Npgsql.BackendMessages; +using Npgsql.BackendMessages; using Npgsql.Tests.Support; -using Npgsql.TypeMapping; using NpgsqlTypes; using NUnit.Framework; using System; using System.Data; +using System.Numerics; using System.Text; using System.Threading; using System.Threading.Tasks; using System.Transactions; +using Npgsql.Internal.Postgres; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; public class BugTests : TestBase { + static uint ByteaOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bytea).Value; + #region Sequential reader bugs [Test, Description("In sequential access, performing a null check on a non-first field would check the first field")] @@ -71,18 +74,6 @@ public void Many_parameters_with_mixed_FormatCode() .Or.EqualTo(PostgresErrorCodes.TooManyColumns)); // PostgreSQL 14.5, 13.8, 12.12, 11.17 and 10.22 changed the returned error } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1238")] - public void Record_with_non_int_field() - { - using var conn = OpenConnection(); - using var cmd = new NpgsqlCommand("SELECT ('one'::TEXT, 2)", conn); - using var reader = cmd.ExecuteReader(); - reader.Read(); - var record = reader.GetFieldValue(0); - Assert.That(record[0], Is.EqualTo("one")); - Assert.That(record[1], Is.EqualTo(2)); - } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1450")] public void Bug1450() { @@ -125,7 +116,8 @@ public async Task Bug3600() CommandTimeout = 1, }; await using var postmasterMock = PgPostmasterMock.Start(csb.ConnectionString); - await using var conn = await OpenConnectionAsync(postmasterMock.ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmasterMock.WaitForServerConnection(); await serverMock .WriteCopyInResponse() @@ -152,26 +144,25 @@ public async Task Bug1497() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1558")] public void Bug1558() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - Enlist = true - }; + csb.Pooling = false; + csb.Enlist = true; + }); using var tx = new TransactionScope(); - using var conn = new NpgsqlConnection(csb.ToString()); - conn.Open(); + using var conn = dataSource.OpenConnection(); } [Test] public void Bug1695() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 1 - }; - using var conn = OpenConnection(csb); + csb.Pooling = false; + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 1; + }); + using var conn = dataSource.OpenConnection(); using (var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn)) using (var reader = cmd.ExecuteReader()) { @@ -184,8 +175,7 @@ public void Bug1695() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1700")] public void Bug1700() - { - Assert.That(() => + => Assert.That(() => { using var conn = OpenConnection(); using var tx = conn.BeginTransaction(); @@ -207,7 +197,6 @@ public void Bug1700() // Note, we never get here tx.Commit(); }, Throws.InvalidOperationException.With.Message.EqualTo("Some problem parsing the returned data")); - } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1964")] public void Bug1964() @@ -288,13 +277,13 @@ public async Task Bug2046() [Test] public void Bug1761() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Enlist = true, - Pooling = true, - MinPoolSize = 1, - MaxPoolSize = 1 - }.ConnectionString; + csb.Enlist = true; + csb.Pooling = true; + csb.MinPoolSize = 1; + csb.MaxPoolSize = 1; + }); for (var i = 0; i < 2; i++) { @@ -306,7 +295,7 @@ public void Bug1761() // Ambient transaction is now unusable, attempts to enlist to it will fail. We should recover // properly from this failure. - using (var connection = OpenConnection(connString)) + using (var connection = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT 1", connection)) { cmd.CommandText = "select 1;"; @@ -375,32 +364,31 @@ enum Bug2278EnumType [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2178")] - public void Bug2178() + public async Task Bug2178() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 2 - }; - using var conn = new NpgsqlConnection(builder.ConnectionString); - using var cmd = new NpgsqlCommand(); - conn.Open(); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 2; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand(); cmd.Connection = conn; cmd.CommandText = "SELECT 1"; - cmd.ExecuteScalar(); - cmd.ExecuteScalar(); + await cmd.ExecuteScalarAsync(); + await cmd.ExecuteScalarAsync(); Assert.That(cmd.IsPrepared); // Now executing a faulty command multiple times cmd.CommandText = "SELECT * FROM public.dummy_table_name"; for (var i = 0; i < 3; ++i) { - Assert.Throws(() => cmd.ExecuteScalar()); + Assert.ThrowsAsync(async () => await cmd.ExecuteScalarAsync()); } cmd.CommandText = "SELECT 1"; - cmd.ExecuteScalar(); + await cmd.ExecuteScalarAsync(); Assert.That(cmd.IsPrepared); } @@ -1100,11 +1088,9 @@ CREATE TEMP TABLE ""OrganisatieQmo_Organisatie_QueryModelObjects_Imp"" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] public async Task Chunked_string_write_buffer_encoding_space() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - // write buffer size must be 8192 for this test to work - // so guard against changes to the default / a change in the test harness - builder.WriteBufferSize = 8192; - await using var conn = await OpenConnectionAsync(builder.ConnectionString); + // write buffer size must be 8192 for this test to work so guard against changes to the default / a change in the test harness + await using var dataSource = CreateDataSource(csb => csb.WriteBufferSize = 8192); + await using var conn = await dataSource.OpenConnectionAsync(); var tableName = await CreateTempTable(conn, "col1 text, col2 text"); @@ -1128,11 +1114,9 @@ public async Task Chunked_string_write_buffer_encoding_space() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] public async Task Chunked_char_array_write_buffer_encoding_space() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - // write buffer size must be 8192 for this test to work - // so guard against changes to the default / a change in the test harness - builder.WriteBufferSize = 8192; - await using var conn = await OpenConnectionAsync(builder.ConnectionString); + // write buffer size must be 8192 for this test to work so guard against changes to the default / a change in the test harness + await using var dataSource = CreateDataSource(csb => csb.WriteBufferSize = 8192); + await using var conn = await dataSource.OpenConnectionAsync(); var tableName = await CreateTempTable(conn, "col1 text, col2 text"); @@ -1206,7 +1190,7 @@ LANGUAGE plpgsql AS END; $$;"); - Assert.ThrowsAsync(async () => await connection.ExecuteScalarAsync($"SELECT {func}(0)")); + Assert.ThrowsAsync(async () => await connection.ExecuteScalarAsync($"SELECT {func}(0)")); } [Test] @@ -1215,9 +1199,9 @@ public void Bug3117() { const string OkCommand = "SELECT 1"; const string ErrorCommand = "SELECT * FROM public.imnotexist"; - using (var conn = new NpgsqlConnection(ConnectionString)) + using var dataSource = CreateDataSource(); + using (var conn = dataSource.OpenConnection()) { - conn.Open(); var okCommand = new NpgsqlCommand(OkCommand, conn); okCommand.Prepare(); using (okCommand.ExecuteReader()) { } @@ -1228,13 +1212,11 @@ public void Bug3117() .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.UndefinedTable)); } - using (var conn = new NpgsqlConnection(ConnectionString)) + using (var conn = dataSource.OpenConnection()) { - conn.Open(); var okCommand = new NpgsqlCommand(OkCommand, conn); okCommand.Prepare(); using (okCommand.ExecuteReader()) { } - conn.UnprepareAll(); } } @@ -1280,20 +1262,20 @@ public async Task Bug3649() using (var exporter = await conn.BeginBinaryExportAsync($"COPY {table} (value) TO STDIN (FORMAT binary)")) { await exporter.StartRowAsync(); - Assert.IsTrue(exporter.IsNull); + Assert.That(exporter.IsNull); await exporter.SkipAsync(); await exporter.StartRowAsync(); - Assert.AreEqual(1, await exporter.ReadAsync()); + Assert.That(await exporter.ReadAsync(), Is.EqualTo(1)); await exporter.StartRowAsync(); - Assert.AreEqual(2, await exporter.ReadAsync()); + Assert.That(await exporter.ReadAsync(), Is.EqualTo(2)); } } [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/3839")] - public async Task SingleThreadedSynchronizationContext_deadlock() + public async Task UIThreadSynchronizationContext_deadlock() { - var syncContext = new SingleThreadSynchronizationContext(nameof(SingleThreadedSynchronizationContext_deadlock)); + var syncContext = new SingleThreadSynchronizationContext(nameof(UIThreadSynchronizationContext_deadlock)); using (var _ = syncContext.Enter()) { // We have to Yield, so the current thread is changed to the one used by SingleThreadSynchronizationContext @@ -1321,7 +1303,8 @@ public async Task Bug3924() }; await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); - await using var conn = await OpenConnectionAsync(postmaster.ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmaster.WaitForServerConnection(); using (var cmd = conn.CreateCommand()) @@ -1350,63 +1333,44 @@ public async Task Bug3924() } } - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4099")] - public async Task Bug4099() - { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Multiplexing = true, - MaxPoolSize = 1 - }; - await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); - await using var firstConn = await OpenConnectionAsync(postmaster.ConnectionString); - await using var secondConn = await OpenConnectionAsync(postmaster.ConnectionString); - - var byteArrayLength = csb.WriteBufferSize + 100; - var firstQuery = firstConn.ExecuteScalarAsync("SELECT data"); - - var server = await postmaster.WaitForServerConnection(); - await server.ExpectExtendedQuery(); - - var secondQuery = secondConn.ExecuteScalarAsync("SELECT other_data"); - await server.ExpectExtendedQuery(); - - var data = new byte[10000]; - await server - .WriteParseComplete() - .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) - .WriteDataRowWithFlush(data); - - var otherData = new byte[10]; - await server - .WriteCommandComplete() - .WriteReadyForQuery() - .WriteParseComplete() - .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) - .WriteDataRow(otherData) - .WriteCommandComplete() - .WriteReadyForQuery() - .FlushAsync(); - - Assert.That(data, Is.EquivalentTo((byte[])(await firstQuery)!)); - Assert.That(otherData, Is.EquivalentTo((byte[])(await secondQuery)!)); - } - [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/4123")] public async Task Bug4123() { - using var conn = OpenConnection(); - using var cmd = new NpgsqlCommand("SELECT 1", conn); - using var rdr = await cmd.ExecuteReaderAsync(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + await using var rdr = await cmd.ExecuteReaderAsync(); await rdr.ReadAsync(); - using var stream = await rdr.GetStreamAsync(0); + await using var stream = await rdr.GetStreamAsync(0); Assert.DoesNotThrowAsync(stream.FlushAsync); Assert.DoesNotThrow(stream.Flush); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6389")] + public async Task Composite_with_BigInteger([Values(CommandBehavior.Default, CommandBehavior.SequentialAccess)] CommandBehavior behavior) + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} as (value numeric)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = $"SELECT ROW(1234567890::numeric)::{type} FROM generate_series(1, 8000)"; + await using var reader = await cmd.ExecuteReaderAsync(behavior); + while (await reader.ReadAsync()) + { + Assert.DoesNotThrowAsync(async () => await reader.GetFieldValueAsync(0)); + } + } + + class Composite_with_BigInteger_Composite + { + public BigInteger Value { get; set; } + } } diff --git a/test/Npgsql.Tests/CommandBuilderTests.cs b/test/Npgsql.Tests/CommandBuilderTests.cs index 90b146d344..2e6fc90d7c 100644 --- a/test/Npgsql.Tests/CommandBuilderTests.cs +++ b/test/Npgsql.Tests/CommandBuilderTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using Npgsql.PostgresTypes; @@ -80,9 +80,9 @@ public async Task DeriveParameters_text_prepared_statement() { const string query = "SELECT @p::integer"; const int answer = 42; - using var _ = CreateTempPool(ConnectionString, out var connString); - using var conn = await OpenConnectionAsync(connString); - using var cmd = new NpgsqlCommand(query, conn); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand(query, conn); cmd.Parameters.AddWithValue("@p", NpgsqlDbType.Integer, answer); cmd.Prepare(); Assert.That(conn.Connector!.PreparedStatementManager.NumPrepared, Is.EqualTo(1)); @@ -102,8 +102,6 @@ public async Task DeriveParameters_text_prepared_statement() Assert.That(conn.Connector.PreparedStatementManager.NumPrepared, Is.EqualTo(1)); cmd.Parameters["@p"].Value = answer; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(answer)); - - conn.UnprepareAll(); } [Test, Description("Tests parameter derivation for array parameters in parameterized queries (CommandType.Text)")] @@ -343,7 +341,7 @@ PRIMARY KEY (Cod) Assert.That(row[0], Is.EqualTo("key1")); Assert.That(row[1], Is.EqualTo("description")); - Assert.That(row[2], Is.EqualTo(new DateTime(2018, 7, 3))); + Assert.That(row[2], Is.EqualTo(new DateOnly(2018, 7, 3))); Assert.That(row[3], Is.EqualTo(new DateTime(2018, 7, 3, 7, 2, 0))); Assert.That(row[4], Is.EqualTo(123)); Assert.That(row[5], Is.EqualTo(123.4)); @@ -366,7 +364,7 @@ public async Task Get_update_command_with_column_aliases() using var cbCommandBuilder = new NpgsqlCommandBuilder(daDataAdapter); daDataAdapter.UpdateCommand = cbCommandBuilder.GetUpdateCommand(); - Assert.True(daDataAdapter.UpdateCommand.CommandText.Contains("SET \"cod\" = @p1, \"descr\" = @p2, \"data\" = @p3 WHERE ((\"cod\" = @p4) AND ((@p5 = 1 AND \"descr\" IS NULL) OR (\"descr\" = @p6)) AND ((@p7 = 1 AND \"data\" IS NULL) OR (\"data\" = @p8)))")); + Assert.That(daDataAdapter.UpdateCommand.CommandText.Contains("SET \"cod\" = @p1, \"descr\" = @p2, \"data\" = @p3 WHERE ((\"cod\" = @p4) AND ((@p5 = 1 AND \"descr\" IS NULL) OR (\"descr\" = @p6)) AND ((@p7 = 1 AND \"data\" IS NULL) OR (\"data\" = @p8)))")); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2846")] @@ -389,4 +387,60 @@ public async Task Get_update_command_with_array_column_type() daDataAdapter.Update(dtTable); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6240")] + public async Task Get_update_command_with_domain_column_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var domainTypeName = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE DOMAIN {domainTypeName} AS smallint"); + + var tableName = await CreateTempTable(adminConnection, $"id serial PRIMARY KEY, domtest {domainTypeName}"); + + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + + using var adapter = new NpgsqlDataAdapter($"select * from {tableName}", conn); + + var builder = new NpgsqlCommandBuilder(adapter) + { + ConflictOption = ConflictOption.CompareAllSearchableValues, + SetAllValues = true + }; + + adapter.InsertCommand = builder.GetInsertCommand(); + adapter.UpdateCommand = builder.GetUpdateCommand(); + adapter.DeleteCommand = builder.GetDeleteCommand(); + + using var dataTable = new DataTable(); + + adapter.Fill(dataTable); + + const short sval = 5; + + var newRow = dataTable.NewRow(); + newRow[1] = sval; + dataTable.Rows.Add(newRow); + + adapter.Update(dataTable); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6240")] + public async Task Fill_datatable_with_array_column_type() + { + await using var connection = await OpenConnectionAsync(); + + var tableName = await CreateTempTable(connection, "id serial PRIMARY KEY, textarr text[] COLLATE pg_catalog.\"default\""); + + using var adapter = new NpgsqlDataAdapter($"select * from {tableName}", connection); + + using var dataTable = new DataTable(); + + adapter.FillSchema(dataTable, SchemaType.Source); + + adapter.MissingSchemaAction = MissingSchemaAction.Ignore; + + adapter.Fill(dataTable); + } } diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs new file mode 100644 index 0000000000..adc5d311a5 --- /dev/null +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -0,0 +1,207 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests; + +public class CommandParameterTests : TestBase +{ + [Test] + [TestCase(CommandBehavior.Default)] + [TestCase(CommandBehavior.SequentialAccess)] + public async Task Input_and_output_parameters(CommandBehavior behavior) + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @c-1 AS c, @a+2 AS b", conn); + cmd.Parameters.Add(new NpgsqlParameter("a", 3)); + var b = new NpgsqlParameter { ParameterName = "b", Direction = ParameterDirection.Output }; + cmd.Parameters.Add(b); + var c = new NpgsqlParameter { ParameterName = "c", Direction = ParameterDirection.InputOutput, Value = 4 }; + cmd.Parameters.Add(c); + using (await cmd.ExecuteReaderAsync(behavior)) + { + Assert.That(b.Value, Is.EqualTo(5)); + Assert.That(c.Value, Is.EqualTo(3)); + } + } + + [Test] + public async Task Send_NpgsqlDbType_Unknown([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p::TIMESTAMP", conn); + cmd.CommandText = "SELECT @p::TIMESTAMP"; + cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Unknown) { Value = "2008-1-1" }); + if (prepare == PrepareOrNot.Prepared) + cmd.Prepare(); + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetValue(0), Is.EqualTo(new DateTime(2008, 1, 1))); + } + + [Test] + public async Task Positional_parameter() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); + } + + [Test] + public async Task Positional_parameters_are_not_supported_with_legacy_batching() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1; SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf() + .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.SyntaxError)); + } + + [Test] + public async Task Unreferenced_named_parameter_works() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + cmd.Parameters.AddWithValue("not_used", 8); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); + } + + [Test] + public async Task Unreferenced_positional_parameter_works() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); + } + + [Test] + public async Task Mixing_positional_and_named_parameters_is_not_supported() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = 9 }); + Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); + } + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] + public async Task Reuse_command_with_different_parameter_placeholder_types() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + cmd.CommandText = "SELECT @p1"; + cmd.Parameters.AddWithValue("@p1", 8); + _ = await cmd.ExecuteScalarAsync(); + + cmd.CommandText = "SELECT $1"; + cmd.Parameters[0].ParameterName = null; + _ = await cmd.ExecuteScalarAsync(); + } + + [Test] + public async Task Positional_output_parameters_are_not_supported() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { Value = 8, Direction = ParameterDirection.InputOutput }); + Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); + } + + [Test] + public void Parameters_get_name() + { + var command = new NpgsqlCommand(); + + // Add parameters. + command.Parameters.Add(new NpgsqlParameter(":Parameter1", DbType.Boolean)); + command.Parameters.Add(new NpgsqlParameter(":Parameter2", DbType.Int32)); + command.Parameters.Add(new NpgsqlParameter(":Parameter3", DbType.DateTime)); + command.Parameters.Add(new NpgsqlParameter("Parameter4", DbType.DateTime)); + + var idbPrmtr = command.Parameters["Parameter1"]; + Assert.That(idbPrmtr, Is.Not.Null); + command.Parameters[0].Value = 1; + + // Get by indexers. + + Assert.That(command.Parameters["Parameter1"].ParameterName, Is.EqualTo(":Parameter1")); + Assert.That(command.Parameters["Parameter2"].ParameterName, Is.EqualTo(":Parameter2")); + Assert.That(command.Parameters["Parameter3"].ParameterName, Is.EqualTo(":Parameter3")); + Assert.That(command.Parameters["Parameter4"].ParameterName, Is.EqualTo("Parameter4")); //Should this work? + + Assert.That(command.Parameters[0].ParameterName, Is.EqualTo(":Parameter1")); + Assert.That(command.Parameters[1].ParameterName, Is.EqualTo(":Parameter2")); + Assert.That(command.Parameters[2].ParameterName, Is.EqualTo(":Parameter3")); + Assert.That(command.Parameters[3].ParameterName, Is.EqualTo("Parameter4")); + } + + [Test] + public async Task Same_param_multiple_times() + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p1, @p1", conn); + cmd.Parameters.AddWithValue("@p1", 8); + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader[0], Is.EqualTo(8)); + Assert.That(reader[1], Is.EqualTo(8)); + } + + [Test] + public async Task Generic_parameter() + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3, @p4", conn); + cmd.Parameters.Add(new NpgsqlParameter("p1", 8)); + cmd.Parameters.Add(new NpgsqlParameter("p2", 8) { NpgsqlDbType = NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter("p3", "hello")); + cmd.Parameters.Add(new NpgsqlParameter("p4", ['f', 'o', 'o'])); + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetInt32(0), Is.EqualTo(8)); + Assert.That(reader.GetInt32(1), Is.EqualTo(8)); + Assert.That(reader.GetString(2), Is.EqualTo("hello")); + Assert.That(reader.GetString(3), Is.EqualTo("foo")); + } + + [Test] + [TestCase(false)] + [TestCase(true)] + public async Task Parameter_must_be_set(bool genericParam) + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1::TEXT", conn); + cmd.Parameters.Add( + genericParam + ? new NpgsqlParameter("p1", null) + : new NpgsqlParameter("p1", null) + ); + + Assert.That(async () => await cmd.ExecuteReaderAsync(), + Throws.Exception + .TypeOf() + .With.Message.EqualTo("Parameter 'p1' must have either its DbType, NpgsqlDbType, DataTypeName or its Value set.")); + } + + [Test] + public async Task Object_generic_param_does_runtime_lookup() + { + await AssertTypeWrite(1, "1", "integer", dbType: DbType.Int32, skipArrayCheck: true); + await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", skipArrayCheck: true); + } + + [Test] + public async Task Object_generic_parameter_works() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); + } +} diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 4f24eb5e18..7c6888c9a7 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1,7 +1,6 @@ using Npgsql.BackendMessages; using Npgsql.Internal; using Npgsql.Tests.Support; -using Npgsql.TypeMapping; using NpgsqlTypes; using NUnit.Framework; using System; @@ -11,12 +10,16 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.Internal.Postgres; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; -public class CommandTests : MultiplexingTestBase +public class CommandTests : TestBase { + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + static uint TextOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Text).Value; + #region Legacy batching [Test] @@ -38,7 +41,7 @@ public async Task Multiple_statements(bool[] queries) { await using var cmd = conn.CreateCommand(); cmd.CommandText = sql; - if (prepare && !IsMultiplexing) + if (prepare) await cmd.PrepareAsync(); await using var reader = await cmd.ExecuteReaderAsync(); var numResultSets = queries.Count(q => q); @@ -54,9 +57,6 @@ public async Task Multiple_statements(bool[] queries) [Test] public async Task Multiple_statements_with_parameters([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT @p1; SELECT @p2"; @@ -80,9 +80,6 @@ public async Task Multiple_statements_with_parameters([Values(PrepareOrNot.NotPr [Test] public async Task SingleRow_legacy_batching([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); if (prepare == PrepareOrNot.Prepared) @@ -134,6 +131,30 @@ public async Task Legacy_batching_is_not_supported_when_EnableSqlParsing_is_disa .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.SyntaxError)); } + [Test] + [NonParallelizable] // Disables sql rewriting + public async Task Positional_parameters_are_supported_when_EnableSqlParsing_is_disabled() + { + using var _ = DisableSqlRewriting(); + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); + } + + [Test] + [NonParallelizable] // Disables sql rewriting + public async Task Named_parameters_are_not_supported_when_EnableSqlParsing_is_disabled() + { + using var _ = DisableSqlRewriting(); + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter("p", 8)); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + } + #endregion #region Timeout @@ -142,13 +163,9 @@ public async Task Legacy_batching_is_not_supported_when_EnableSqlParsing_is_disa [IssueLink("https://github.com/npgsql/npgsql/issues/327")] public async Task Timeout() { - if (IsMultiplexing) - return; // Multiplexing, Timeout - - // Mono throws a socket exception with WouldBlock instead of TimedOut (see #1330) - var isMono = Type.GetType("Mono.Runtime") != null; - using var conn = await OpenConnectionAsync(ConnectionString + ";CommandTimeout=1"); - using var cmd = CreateSleepCommand(conn, 10); + await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = CreateSleepCommand(conn, 10); Assert.That(() => cmd.ExecuteNonQuery(), Throws.Exception .TypeOf() .With.InnerException.TypeOf() @@ -160,11 +177,9 @@ public async Task Timeout() [IssueLink("https://github.com/npgsql/npgsql/issues/607")] public async Task Timeout_async_soft() { - if (IsMultiplexing) - return; // Multiplexing, Timeout - - using var conn = await OpenConnectionAsync(builder => builder.CommandTimeout = 1); - using var cmd = CreateSleepCommand(conn, 10); + await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = CreateSleepCommand(conn, 10); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception .TypeOf() @@ -176,13 +191,10 @@ public async Task Timeout_async_soft() [IssueLink("https://github.com/npgsql/npgsql/issues/607")] public async Task Timeout_async_hard() { - if (IsMultiplexing) - return; // Multiplexing, Timeout - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { CommandTimeout = 1 }; await using var postmasterMock = PgPostmasterMock.Start(builder.ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; @@ -202,13 +214,9 @@ public async Task Timeout_from_connection_string() { Assert.That(NpgsqlConnector.MinimumInternalCommandTimeout, Is.Not.EqualTo(NpgsqlCommand.DefaultTimeout)); var timeout = NpgsqlConnector.MinimumInternalCommandTimeout; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - CommandTimeout = timeout - }.ToString(); - using var conn = new NpgsqlConnection(connString); - var command = new NpgsqlCommand("SELECT 1", conn); - conn.Open(); + await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = timeout); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var command = new NpgsqlCommand("SELECT 1", conn); Assert.That(command.CommandTimeout, Is.EqualTo(timeout)); command.CommandTimeout = 10; await command.ExecuteScalarAsync(); @@ -218,42 +226,35 @@ public async Task Timeout_from_connection_string() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/395")] public async Task Timeout_switch_connection() { - using (var conn = new NpgsqlConnection(ConnectionString)) + var csb = new NpgsqlConnectionStringBuilder(ConnectionString); + if (csb.CommandTimeout is >= 100 and < 105) + IgnoreExceptOnBuildServer("Bad default command timeout"); + + await using var dataSource1 = CreateDataSource(ConnectionString + ";CommandTimeout=100"); + await using var c1 = dataSource1.CreateConnection(); + await using var cmd = c1.CreateCommand(); + Assert.That(cmd.CommandTimeout, Is.EqualTo(100)); + await using var dataSource2 = CreateDataSource(ConnectionString + ";CommandTimeout=101"); + await using (var c2 = dataSource2.CreateConnection()) { - if (conn.CommandTimeout >= 100 && conn.CommandTimeout < 105) - TestUtil.IgnoreExceptOnBuildServer("Bad default command timeout"); + cmd.Connection = c2; + Assert.That(cmd.CommandTimeout, Is.EqualTo(101)); } - - using (var c1 = await OpenConnectionAsync(ConnectionString + ";CommandTimeout=100")) + cmd.CommandTimeout = 102; + await using (var c2 = dataSource2.CreateConnection()) { - using (var cmd = c1.CreateCommand()) - { - Assert.That(cmd.CommandTimeout, Is.EqualTo(100)); - using (var c2 = new NpgsqlConnection(ConnectionString + ";CommandTimeout=101")) - { - cmd.Connection = c2; - Assert.That(cmd.CommandTimeout, Is.EqualTo(101)); - } - cmd.CommandTimeout = 102; - using (var c2 = new NpgsqlConnection(ConnectionString + ";CommandTimeout=101")) - { - cmd.Connection = c2; - Assert.That(cmd.CommandTimeout, Is.EqualTo(102)); - } - } + cmd.Connection = c2; + Assert.That(cmd.CommandTimeout, Is.EqualTo(102)); } } [Test] public async Task Prepare_timeout_hard([Values] SyncOrAsync async) { - if (IsMultiplexing) - return; // Multiplexing, Timeout - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { CommandTimeout = 1 }; await using var postmasterMock = PgPostmasterMock.Start(builder.ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; @@ -282,11 +283,8 @@ public async Task Prepare_timeout_hard([Values] SyncOrAsync async) [Test, Description("Basic cancellation scenario")] public async Task Cancel() { - if (IsMultiplexing) - return; - - using var conn = await OpenConnectionAsync(); - using var cmd = CreateSleepCommand(conn, 5); + await using var conn = await OpenConnectionAsync(); + await using var cmd = CreateSleepCommand(conn, 5); var queryTask = Task.Run(() => cmd.ExecuteNonQuery()); // We have to be sure the command's state is InProgress, otherwise the cancellation request will never be sent @@ -302,11 +300,8 @@ public async Task Cancel() [Test] public async Task Cancel_async_immediately() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var conn = await OpenConnectionAsync(); - using var cmd = conn.CreateCommand(); + await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 1"; var t = cmd.ExecuteScalarAsync(new(canceled: true)); @@ -321,11 +316,8 @@ public async Task Cancel_async_immediately() [Test, Description("Cancels an async query with the cancellation token, with successful PG cancellation")] public async Task Cancel_async_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var conn = await OpenConnectionAsync(); - using var cmd = CreateSleepCommand(conn); + await using var cmd = CreateSleepCommand(conn); using var cancellationSource = new CancellationTokenSource(); var t = cmd.ExecuteNonQueryAsync(cancellationSource.Token); cancellationSource.Cancel(); @@ -339,15 +331,51 @@ public async Task Cancel_async_soft() Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } + [Test, Description("Cancels an async query with the cancellation token and prepended query, with successful PG cancellation")] + [IssueLink("https://github.com/npgsql/npgsql/issues/5191")] + public async Task Cancel_async_soft_with_prepended_query() + { + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + var server = await postmasterMock.WaitForServerConnection(); + + var processId = conn.ProcessID; + + await using var tx = await conn.BeginTransactionAsync(); + await using var cmd = CreateSleepCommand(conn); + using var cancellationSource = new CancellationTokenSource(); + var t = cmd.ExecuteNonQueryAsync(cancellationSource.Token); + + await server.ExpectSimpleQuery("BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED"); + cancellationSource.Cancel(); + await server + .WriteCommandComplete() + .WriteReadyForQuery(TransactionStatus.InTransactionBlock) + .FlushAsync(); + + Assert.That((await postmasterMock.WaitForCancellationRequest()).ProcessId, + Is.EqualTo(processId)); + + await server + .WriteErrorResponse(PostgresErrorCodes.QueryCanceled) + .WriteReadyForQuery() + .FlushAsync(); + + var exception = Assert.ThrowsAsync(async () => await t)!; + Assert.That(exception.InnerException, + Is.TypeOf().With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.QueryCanceled)); + Assert.That(exception.CancellationToken, Is.EqualTo(cancellationSource.Token)); + + Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); + } + [Test, Description("Cancels an async query with the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task Cancel_async_hard() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; @@ -370,22 +398,19 @@ public async Task Cancel_async_hard() [Ignore("https://github.com/npgsql/npgsql/issues/4668")] public async Task Bug3466([Values(false, true)] bool isBroken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { Pooling = false }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString(), completeCancellationImmediately: false); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; using var cancellationSource = new CancellationTokenSource(); - using var cmd = new NpgsqlCommand("SELECT 1", conn) + await using var cmd = new NpgsqlCommand("SELECT 1", conn) { CommandTimeout = 4 }; @@ -408,7 +433,7 @@ public async Task Bug3466([Values(false, true)] bool isBroken) await serverMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .WriteCommandComplete() .WriteReadyForQuery() @@ -427,9 +452,9 @@ await serverMock [Explicit("Timing-sensitive")] public async Task Cancel_cross_command() { - using var conn = await OpenConnectionAsync(); - using var cmd1 = CreateSleepCommand(conn, 2); - using var cmd2 = new NpgsqlCommand("SELECT 1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd1 = CreateSleepCommand(conn, 2); + await using var cmd2 = new NpgsqlCommand("SELECT 1", conn); var cancelTask = Task.Factory.StartNew(() => { Thread.Sleep(300); @@ -461,7 +486,7 @@ public async Task Cursor_statement() while (dr.Read()) i++; - Assert.AreEqual(3, i); + Assert.That(i, Is.EqualTo(3)); dr.Close(); i = 0; @@ -469,7 +494,7 @@ public async Task Cursor_statement() var dr2 = command.ExecuteReader(); while (dr2.Read()) i++; - Assert.AreEqual(1, i); + Assert.That(i, Is.EqualTo(1)); dr2.Close(); command.CommandText = "close te;"; @@ -485,7 +510,7 @@ public async Task Cursor_move_RecordsAffected() command.ExecuteNonQuery(); command.CommandText = "MOVE FORWARD ALL IN curs"; var count = command.ExecuteNonQuery(); - Assert.AreEqual(3, count); + Assert.That(count, Is.EqualTo(3)); } #endregion @@ -528,217 +553,23 @@ public async Task CloseConnection_with_exception() [Test] public async Task SingleRow([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT 1, 2 UNION SELECT 3, 4", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1, 2 UNION SELECT 3, 4", conn); if (prepare == PrepareOrNot.Prepared) cmd.Prepare(); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SingleRow); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SingleRow); Assert.That(() => reader.GetInt32(0), Throws.Exception.TypeOf()); Assert.That(reader.Read(), Is.True); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); Assert.That(reader.Read(), Is.False); } - #region Parameters - - [Test] - public async Task Positional_parameter() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); - } - - [Test] - public async Task Positional_parameters_are_not_supported_with_legacy_batching() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1; SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf() - .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.SyntaxError)); - } - - [Test] - [NonParallelizable] // Disables sql rewriting - public async Task Positional_parameters_are_supported_when_EnableSqlParsing_is_disabled() - { - using var _ = DisableSqlRewriting(); - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); - } - - [Test] - [NonParallelizable] // Disables sql rewriting - public async Task Named_parameters_are_not_supported_when_EnableSqlParsing_is_disabled() - { - using var _ = DisableSqlRewriting(); - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("p", 8)); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); - } - - [Test, Description("Makes sure writing an unset parameter isn't allowed")] - public async Task Parameter_without_Value() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("@p", NpgsqlDbType.Integer)); - Assert.That(() => cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); - } - - [Test] - public async Task Unreferenced_named_parameter_works() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - cmd.Parameters.AddWithValue("not_used", 8); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); - } - - [Test] - public async Task Unreferenced_positional_parameter_works() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); - } - - [Test] - public async Task Mixing_positional_and_named_parameters_is_not_supported() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1, @p", conn); - cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = 9 }); - Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] - public async Task Cached_command_clears_parameters_placeholder_type() - { - await using var conn = await OpenConnectionAsync(); - - await using (var cmd1 = conn.CreateCommand()) - { - cmd1.CommandText = "SELECT @p1"; - cmd1.Parameters.AddWithValue("@p1", 8); - await using var reader1 = await cmd1.ExecuteReaderAsync(); - reader1.Read(); - Assert.That(reader1[0], Is.EqualTo(8)); - } - - await using (var cmd2 = conn.CreateCommand()) - { - cmd2.CommandText = "SELECT $1"; - cmd2.Parameters.AddWithValue(8); - await using var reader2 = await cmd2.ExecuteReaderAsync(); - reader2.Read(); - Assert.That(reader2[0], Is.EqualTo(8)); - } - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] - public async Task Reuse_command_with_different_parameter_placeholder_types() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = conn.CreateCommand(); - - cmd.CommandText = "SELECT @p1"; - cmd.Parameters.AddWithValue("@p1", 8); - _ = await cmd.ExecuteScalarAsync(); - - cmd.CommandText = "SELECT $1"; - cmd.Parameters[0].ParameterName = null; - _ = await cmd.ExecuteScalarAsync(); - } - - [Test] - public async Task Positional_output_parameters_are_not_supported() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { Value = 8, Direction = ParameterDirection.InputOutput }); - Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); - } - - [Test] - public void Parameters_get_name() - { - var command = new NpgsqlCommand(); - - // Add parameters. - command.Parameters.Add(new NpgsqlParameter(":Parameter1", DbType.Boolean)); - command.Parameters.Add(new NpgsqlParameter(":Parameter2", DbType.Int32)); - command.Parameters.Add(new NpgsqlParameter(":Parameter3", DbType.DateTime)); - command.Parameters.Add(new NpgsqlParameter("Parameter4", DbType.DateTime)); - - var idbPrmtr = command.Parameters["Parameter1"]; - Assert.IsNotNull(idbPrmtr); - command.Parameters[0].Value = 1; - - // Get by indexers. - - Assert.AreEqual(":Parameter1", command.Parameters["Parameter1"].ParameterName); - Assert.AreEqual(":Parameter2", command.Parameters["Parameter2"].ParameterName); - Assert.AreEqual(":Parameter3", command.Parameters["Parameter3"].ParameterName); - Assert.AreEqual("Parameter4", command.Parameters["Parameter4"].ParameterName); //Should this work? - - Assert.AreEqual(":Parameter1", command.Parameters[0].ParameterName); - Assert.AreEqual(":Parameter2", command.Parameters[1].ParameterName); - Assert.AreEqual(":Parameter3", command.Parameters[2].ParameterName); - Assert.AreEqual("Parameter4", command.Parameters[3].ParameterName); - } - - [Test] - public async Task Same_param_multiple_times() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p1", conn); - cmd.Parameters.AddWithValue("@p1", 8); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader[0], Is.EqualTo(8)); - Assert.That(reader[1], Is.EqualTo(8)); - } - - [Test] - public async Task Generic_parameter() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3, @p4", conn); - cmd.Parameters.Add(new NpgsqlParameter("p1", 8)); - cmd.Parameters.Add(new NpgsqlParameter("p2", 8) { NpgsqlDbType = NpgsqlDbType.Integer }); - cmd.Parameters.Add(new NpgsqlParameter("p3", "hello")); - cmd.Parameters.Add(new NpgsqlParameter("p4", new[] { 'f', 'o', 'o' })); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetInt32(0), Is.EqualTo(8)); - Assert.That(reader.GetInt32(1), Is.EqualTo(8)); - Assert.That(reader.GetString(2), Is.EqualTo("hello")); - Assert.That(reader.GetString(3), Is.EqualTo("foo")); - } - - #endregion Parameters - [Test] public async Task CommandText_not_set() { - using var conn = await OpenConnectionAsync(); - using (var cmd = new NpgsqlCommand()) + await using var conn = await OpenConnectionAsync(); + await using (var cmd = new NpgsqlCommand()) { cmd.Connection = conn; Assert.That(cmd.ExecuteNonQueryAsync, Throws.Exception.TypeOf()); @@ -747,16 +578,16 @@ public async Task CommandText_not_set() cmd.CommandText = ""; } - using (var cmd = conn.CreateCommand()) + await using (var cmd = conn.CreateCommand()) Assert.That(cmd.ExecuteNonQueryAsync, Throws.Exception.TypeOf()); } [Test] public async Task ExecuteScalar() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); - using var command = new NpgsqlCommand($"SELECT name FROM {table}", conn); + await using var command = new NpgsqlCommand($"SELECT name FROM {table}", conn); Assert.That(command.ExecuteScalarAsync, Is.Null); await conn.ExecuteNonQueryAsync($"INSERT INTO {table} (name) VALUES (NULL)"); @@ -771,8 +602,8 @@ public async Task ExecuteScalar() [Test] public async Task ExecuteNonQuery() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand { Connection = conn }; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand { Connection = conn }; var table = await CreateTempTable(conn, "name TEXT"); cmd.CommandText = $"INSERT INTO {table} (name) VALUES ('John')"; @@ -788,7 +619,7 @@ public async Task ExecuteNonQuery() [Test, Description("Makes sure a command is unusable after it is disposed")] public async Task Dispose() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var cmd = new NpgsqlCommand("SELECT 1", conn); cmd.Dispose(); Assert.That(() => cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); @@ -800,7 +631,7 @@ public async Task Dispose() [Test, Description("Disposing a command with an open reader does not close the reader. This is the SqlClient behavior.")] public async Task Command_Dispose_does_not_close_reader() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var cmd = new NpgsqlCommand("SELECT 1, 2", conn); await cmd.ExecuteReaderAsync(); cmd.Dispose(); @@ -811,33 +642,50 @@ public async Task Command_Dispose_does_not_close_reader() [Test] public async Task Non_standards_conforming_strings() { - using var _ = CreateTempPool(ConnectionString, out var connString); - await using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); - if (IsMultiplexing) - { - Assert.That(() => conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"), - Throws.Exception.TypeOf()); - } - else - { - await conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"); - Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); - await conn.ExecuteNonQueryAsync("set standard_conforming_strings=on"); - } + await conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + await conn.ExecuteNonQueryAsync("set standard_conforming_strings=on"); } [Test] public async Task Parameter_and_operator_unclear() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); //Without parenthesis the meaning of [, . and potentially other characters is //a syntax error. See comment in NpgsqlCommand.GetClearCommandText() on "usually-redundant parenthesis". - using var command = new NpgsqlCommand("select :arr[2]", conn); + await using var command = new NpgsqlCommand("select :arr[2]", conn); command.Parameters.AddWithValue(":arr", new int[] {5, 4, 3, 2, 1}); - using var rdr = await command.ExecuteReaderAsync(); + await using var rdr = await command.ExecuteReaderAsync(); rdr.Read(); - Assert.AreEqual(rdr.GetInt32(0), 4); + Assert.That(rdr.GetInt32(0), Is.EqualTo(4)); + } + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] + public async Task Cached_command_clears_parameters_placeholder_type() + { + await using var conn = await OpenConnectionAsync(); + + await using (var cmd1 = conn.CreateCommand()) + { + cmd1.CommandText = "SELECT @p1"; + cmd1.Parameters.AddWithValue("@p1", 8); + await using var reader1 = await cmd1.ExecuteReaderAsync(); + reader1.Read(); + Assert.That(reader1[0], Is.EqualTo(8)); + } + + await using (var cmd2 = conn.CreateCommand()) + { + cmd2.CommandText = "SELECT $1"; + cmd2.Parameters.AddWithValue(8); + await using var reader2 = await cmd2.ExecuteReaderAsync(); + reader2.Read(); + Assert.That(reader2[0], Is.EqualTo(8)); + } } [Test] @@ -845,15 +693,15 @@ public async Task Parameter_and_operator_unclear() [TestCase(CommandBehavior.SequentialAccess)] public async Task Statement_mapped_output_parameters(CommandBehavior behavior) { - using var conn = await OpenConnectionAsync(); - var command = new NpgsqlCommand("select 3, 4 as param1, 5 as param2, 6;", conn); + await using var conn = await OpenConnectionAsync(); + var command = new NpgsqlCommand("select 3 as unknown, 4 as param1, 5 as param2, 6;", conn); - var p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); + var p = new NpgsqlParameter("param1", NpgsqlDbType.Integer); p.Direction = ParameterDirection.Output; p.Value = -1; command.Parameters.Add(p); - p = new NpgsqlParameter("param1", NpgsqlDbType.Integer); + p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); p.Direction = ParameterDirection.Output; p.Value = -1; command.Parameters.Add(p); @@ -863,17 +711,57 @@ public async Task Statement_mapped_output_parameters(CommandBehavior behavior) p.Value = -1; command.Parameters.Add(p); - using var reader = await command.ExecuteReaderAsync(behavior); + await using var reader = await command.ExecuteReaderAsync(behavior); - Assert.AreEqual(4, command.Parameters["param1"].Value); - Assert.AreEqual(5, command.Parameters["param2"].Value); + Assert.That(command.Parameters["p"].Value, Is.EqualTo(3)); + Assert.That(command.Parameters["param1"].Value, Is.EqualTo(4)); + Assert.That(command.Parameters["param2"].Value, Is.EqualTo(5)); reader.Read(); - Assert.AreEqual(3, reader.GetInt32(0)); - Assert.AreEqual(4, reader.GetInt32(1)); - Assert.AreEqual(5, reader.GetInt32(2)); - Assert.AreEqual(6, reader.GetInt32(3)); + Assert.That(reader.GetInt32(0), Is.EqualTo(3)); + Assert.That(reader.GetInt32(1), Is.EqualTo(4)); + Assert.That(reader.GetInt32(2), Is.EqualTo(5)); + Assert.That(reader.GetInt32(3), Is.EqualTo(6)); + } + + + [Test] + [TestCase(CommandBehavior.Default)] + [TestCase(CommandBehavior.SequentialAccess)] + public async Task Statement_mapped_generic_output_parameters(CommandBehavior behavior) + { + await using var conn = await OpenConnectionAsync(); + var command = new NpgsqlCommand("select '' as unknown, 4 as param1, 5 as param2, 6;", conn); + + var p = new NpgsqlParameter("param1", NpgsqlDbType.Integer); + p.Direction = ParameterDirection.Output; + p.Value = -1; + command.Parameters.Add(p); + + p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); + p.Direction = ParameterDirection.Output; + p.Value = -1; + command.Parameters.Add(p); + + // char[] is one alternative mapping for text. + var textP = new NpgsqlParameter("p", NpgsqlDbType.Text); + textP.Direction = ParameterDirection.Output; + textP.Value = "text".ToCharArray(); + command.Parameters.Add(textP); + + await using var reader = await command.ExecuteReaderAsync(behavior); + + Assert.That(command.Parameters["p"].Value, Is.EquivalentTo(Array.Empty())); + Assert.That(command.Parameters["param1"].Value, Is.EqualTo(4)); + Assert.That(command.Parameters["param2"].Value, Is.EqualTo(5)); + + reader.Read(); + + Assert.That(reader.GetFieldValue(0), Is.EquivalentTo(Array.Empty())); + Assert.That(reader.GetInt32(1), Is.EqualTo(4)); + Assert.That(reader.GetInt32(2), Is.EqualTo(5)); + Assert.That(reader.GetInt32(3), Is.EqualTo(6)); } [Test] @@ -904,30 +792,27 @@ public async Task Bug1006158_output_parameters() _ = await command.ExecuteScalarAsync(); - Assert.AreEqual(3, command.Parameters[0].Value); - Assert.AreEqual(true, command.Parameters[1].Value); + Assert.That(command.Parameters[0].Value, Is.EqualTo(3)); + Assert.That(command.Parameters[1].Value, Is.EqualTo(true)); } [Test] public async Task Bug1010788_UpdateRowSource() { - if (IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id SERIAL PRIMARY KEY, name TEXT"); var command = new NpgsqlCommand($"SELECT * FROM {table}", conn); - Assert.AreEqual(UpdateRowSource.Both, command.UpdatedRowSource); + Assert.That(command.UpdatedRowSource, Is.EqualTo(UpdateRowSource.Both)); var cmdBuilder = new NpgsqlCommandBuilder(); var da = new NpgsqlDataAdapter(command); cmdBuilder.DataAdapter = da; - Assert.IsNotNull(da.SelectCommand); - Assert.IsNotNull(cmdBuilder.DataAdapter); + Assert.That(da.SelectCommand, Is.Not.Null); + Assert.That(cmdBuilder.DataAdapter, Is.Not.Null); var updateCommand = cmdBuilder.GetUpdateCommand(); - Assert.AreEqual(UpdateRowSource.None, updateCommand.UpdatedRowSource); + Assert.That(updateCommand.UpdatedRowSource, Is.EqualTo(UpdateRowSource.None)); } [Test] @@ -943,57 +828,19 @@ public async Task TableDirect() Assert.That(rdr["name"], Is.EqualTo("foo")); } - [Test] - [TestCase(CommandBehavior.Default)] - [TestCase(CommandBehavior.SequentialAccess)] - public async Task Input_and_output_parameters(CommandBehavior behavior) - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @c-1 AS c, @a+2 AS b", conn); - cmd.Parameters.Add(new NpgsqlParameter("a", 3)); - var b = new NpgsqlParameter { ParameterName = "b", Direction = ParameterDirection.Output }; - cmd.Parameters.Add(b); - var c = new NpgsqlParameter { ParameterName = "c", Direction = ParameterDirection.InputOutput, Value = 4 }; - cmd.Parameters.Add(c); - using (await cmd.ExecuteReaderAsync(behavior)) - { - Assert.AreEqual(5, b.Value); - Assert.AreEqual(3, c.Value); - } - } - - [Test] - public async Task Send_NpgsqlDbType_Unknown([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) - { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p::TIMESTAMP", conn); - cmd.CommandText = "SELECT @p::TIMESTAMP"; - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Unknown) { Value = "2008-1-1" }); - if (prepare == PrepareOrNot.Prepared) - cmd.Prepare(); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(new DateTime(2008, 1, 1))); - } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/503")] public async Task Invalid_UTF8() { const string badString = "SELECT 'abc\uD801\uD802d'"; - using var _ = CreateTempPool(ConnectionString, out var connString); - using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + using var conn = await dataSource.OpenConnectionAsync(); Assert.That(() => conn.ExecuteScalarAsync(badString), Throws.Exception.TypeOf()); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/395")] public async Task Use_across_connection_change([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn1 = await OpenConnectionAsync(); using var conn2 = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); @@ -1006,6 +853,202 @@ public async Task Use_across_connection_change([Values(PrepareOrNot.Prepared, Pr Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); } + // The asserts we're testing are debug only. + [Test] + public async Task Use_after_reload_types_invalidates_cached_infos() + { + using var conn1 = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT 1", conn1); + cmd.Prepare(); + using (var reader = await cmd.ExecuteReaderAsync()) + { + await reader.ReadAsync(); + Assert.DoesNotThrow(() => reader.GetInt32(0)); + } + + await conn1.ReloadTypesAsync(); + + using (var reader = await cmd.ExecuteReaderAsync()) + { + await reader.ReadAsync(); + Assert.DoesNotThrow(() => reader.GetInt32(0)); + } + } + + [Test] + public async Task Parameter_overflow_message_length_throws() + { + // Create a separate data source to avoid breaking unrelated queries + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @a, @b, @c, @d, @e, @f, @g, @h", conn); + + var largeParam = new string('A', 1 << 29); + cmd.Parameters.AddWithValue("a", largeParam); + cmd.Parameters.AddWithValue("b", largeParam); + cmd.Parameters.AddWithValue("c", largeParam); + cmd.Parameters.AddWithValue("d", largeParam); + cmd.Parameters.AddWithValue("e", largeParam); + cmd.Parameters.AddWithValue("f", largeParam); + cmd.Parameters.AddWithValue("g", largeParam); + cmd.Parameters.AddWithValue("h", largeParam); + + Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync()); + } + + [Test] + public async Task Composite_overflow_message_length_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync( + $"CREATE TYPE {type} AS (a text, b text, c text, d text, e text, f text, g text, h text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', 1 << 29); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + cmd.Parameters.AddWithValue("a", new BigComposite + { + A = largeString, + B = largeString, + C = largeString, + D = largeString, + E = largeString, + F = largeString, + G = largeString, + H = largeString + }); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + + record BigComposite + { + public string A { get; set; } = null!; + public string B { get; set; } = null!; + public string C { get; set; } = null!; + public string D { get; set; } = null!; + public string E { get; set; } = null!; + public string F { get; set; } = null!; + public string G { get; set; } = null!; + public string H { get; set; } = null!; + } + + [Test] + public async Task Array_overflow_message_length_throws() + { + // Create a separate data source to avoid breaking unrelated queries + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', 1 << 29); + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT @a"; + var array = new[] + { + largeString, + largeString, + largeString, + largeString, + largeString, + largeString, + largeString, + largeString + }; + cmd.Parameters.AddWithValue("a", array); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + + [Test] + public async Task Range_overflow_message_length_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync( + $"CREATE TYPE {type} AS (a text, b text, c text, d text, e text, f text, g text, h text);CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', (1 << 28) + 2000000); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + var composite = new BigComposite + { + A = largeString, + B = largeString, + C = largeString, + D = largeString + }; + var range = new NpgsqlRange(composite, composite); + cmd.Parameters.Add(new NpgsqlParameter + { + Value = range, + ParameterName = "a", + DataTypeName = rangeType + }); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + + [Test] + public async Task Multirange_overflow_message_length_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + MinimumPgVersion(adminConnection, "14.0", "Multirange types were introduced in PostgreSQL 14"); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync( + $"CREATE TYPE {type} AS (a text, b text, c text, d text, e text, f text, g text, h text);CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', (1 << 28) + 2000000); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + var composite = new BigComposite + { + A = largeString + }; + var range = new NpgsqlRange(composite, composite); + var multirange = new[] + { + range, + range, + range, + range + }; + cmd.Parameters.Add(new NpgsqlParameter + { + Value = multirange, + ParameterName = "a", + DataTypeName = rangeType + "_multirange" + }); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + [Test, Description("CreateCommand before connection open")] [IssueLink("https://github.com/npgsql/npgsql/issues/565")] public async Task Create_command_before_connection_open() @@ -1034,9 +1077,6 @@ public void Connection_not_open_throws() [Test] public async Task ExecuteNonQuery_Throws_PostgresException([Values] bool async) { - if (!async && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table1 = await CreateTempTable(conn, "id integer PRIMARY key, t varchar(40)"); @@ -1053,9 +1093,6 @@ public async Task ExecuteNonQuery_Throws_PostgresException([Values] bool async) [Test] public async Task ExecuteScalar_Throws_PostgresException([Values] bool async) { - if (!async && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table1 = await CreateTempTable(conn, "id integer PRIMARY key, t varchar(40)"); @@ -1072,9 +1109,6 @@ public async Task ExecuteScalar_Throws_PostgresException([Values] bool async) [Test] public async Task ExecuteReader_Throws_PostgresException([Values] bool async) { - if (!async && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table1 = await CreateTempTable(conn, "id integer PRIMARY key, t varchar(40)"); @@ -1087,10 +1121,10 @@ public async Task ExecuteReader_Throws_PostgresException([Values] bool async) ? await cmd.ExecuteReaderAsync() : cmd.ExecuteReader(); - Assert.IsTrue(async ? await reader.ReadAsync() : reader.Read()); + Assert.That(async ? await reader.ReadAsync() : reader.Read()); var value = reader.GetInt32(0); Assert.That(value, Is.EqualTo(1)); - Assert.IsFalse(async ? await reader.ReadAsync() : reader.Read()); + Assert.That(async ? await reader.ReadAsync() : reader.Read(), Is.False); var ex = async ? Assert.ThrowsAsync(async () => await reader.NextResultAsync()) : Assert.Throws(() => reader.NextResult()); @@ -1098,11 +1132,15 @@ public async Task ExecuteReader_Throws_PostgresException([Values] bool async) } [Test] - public void Command_is_recycled() + public void Command_is_recycled([Values] bool allResultTypesAreUnknown) { using var conn = OpenConnection(); var cmd1 = conn.CreateCommand(); cmd1.CommandText = "SELECT @p1"; + if (allResultTypesAreUnknown) + cmd1.AllResultTypesAreUnknown = true; + else + cmd1.UnknownResultTypeList = [true]; var tx = conn.BeginTransaction(); cmd1.Transaction = tx; cmd1.Parameters.AddWithValue("p1", 8); @@ -1115,6 +1153,8 @@ public void Command_is_recycled() Assert.That(cmd2.CommandType, Is.EqualTo(CommandType.Text)); Assert.That(cmd2.Transaction, Is.Null); Assert.That(cmd2.Parameters, Is.Empty); + Assert.That(cmd2.AllResultTypesAreUnknown, Is.False); + Assert.That(cmd2.UnknownResultTypeList, Is.Null); // TODO: Leaving this for now, since it'll be replaced by the new batching API // Assert.That(cmd2.Statements, Is.Empty); } @@ -1136,9 +1176,6 @@ public void Command_recycled_resets_CommandType() [IssueLink("https://github.com/npgsql/npgsql/issues/2795")] public async Task Many_parameters([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "some_column INT"); using var cmd = new NpgsqlCommand { Connection = conn }; @@ -1166,9 +1203,6 @@ public async Task Many_parameters([Values(PrepareOrNot.NotPrepared, PrepareOrNot [IssueLink("https://github.com/npgsql/npgsql/issues/2703")] public async Task Too_many_parameters_throws([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand { Connection = conn }; var sb = new StringBuilder("SOME RANDOM SQL "); @@ -1181,6 +1215,7 @@ public async Task Too_many_parameters_throws([Values(PrepareOrNot.NotPrepared, P sb.Append('@'); sb.Append(paramName); } + cmd.CommandText = sb.ToString(); if (prepare == PrepareOrNot.Prepared) @@ -1252,9 +1287,6 @@ public async Task Batched_big_statements_do_not_deadlock() [Test] public void Batched_small_then_big_statements_do_not_deadlock_in_sync_io() { - if (IsMultiplexing) - return; // Multiplexing, sync I/O - // This makes sure we switch to async writing for batches, starting from the 2nd statement at the latest. // Otherwise, a small first first statement followed by a huge big one could cause us to deadlock, as we're stuck // synchronously sending the 2nd statement while PG is stuck sending the results of the 1st. @@ -1293,16 +1325,13 @@ public async Task Same_command_different_param_instances() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3509"), Ignore("Flaky")] public async Task Bug3509() { - if (IsMultiplexing) - return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { KeepAlive = 1, }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString()); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmasterMock.WaitForServerConnection(); // Wait for a keepalive to arrive at the server, reply with an error await serverMock.WaitForData(); @@ -1342,9 +1371,6 @@ public async Task Cached_command_double_dispose() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4330")] public async Task Prepare_with_positional_placeholders_after_named() { - if (IsMultiplexing) - return; // Explicit preparation - await using var conn = await OpenConnectionAsync(); await using var command = new NpgsqlCommand("SELECT @p", conn); @@ -1362,12 +1388,9 @@ public async Task Prepare_with_positional_placeholders_after_named() [Description("Most of 08* errors are coming whenever there was an error while connecting to a remote server from a cluster, so the connection to the cluster is still OK")] public async Task Postgres_connection_errors_not_break_connection() { - if (IsMultiplexing) - return; - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 1"; @@ -1384,100 +1407,210 @@ await server Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); } - #region Logging - - [Test] - public async Task Log_ExecuteScalar_single_statement_without_parameters() + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4804")] + [Description("Concurrent write and read failure can lead to deadlocks while cleaning up the connector.")] + public async Task Concurrent_read_write_failure_deadlock() { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } + await using var cmd = conn.CreateCommand(); + // Attempt to send a big enough query to fill buffers + // That way the write side should be stuck, waiting for the server to empty buffers + cmd.CommandText = new string('a', 8_000_000); + var queryTask = cmd.ExecuteNonQueryAsync(); - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + var server = await postmasterMock.WaitForServerConnection(); + server.Close(); - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + Assert.ThrowsAsync(async () => await queryTask); } - [Test] - public async Task Log_ExecuteScalar_single_statement_with_positional_parameters() + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4906")] + [Description("Make sure we don't cancel a prepended query (and do not deadlock in case of a failure)")] + [Explicit("Flaky due to #5033")] + public async Task Not_cancel_prepended_query([Values] bool failPrependedQuery) { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + var csb = new NpgsqlConnectionStringBuilder(postmasterMock.ConnectionString) + { + NoResetOnClose = false + }; + await using var dataSource = CreateDataSource(csb.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); - cmd.Parameters.Add(new() { Value = 8 }); - cmd.Parameters.Add(new() { NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + // reopen connection to append prepended query + await conn.CloseAsync(); + await conn.OpenAsync(); - using (listLoggerProvider.Record()) + using var cts = new CancellationTokenSource(); + var queryTask = conn.ExecuteNonQueryAsync("SELECT 1", cancellationToken: cts.Token); + + var server = await postmasterMock.WaitForServerConnection(); + await server.ExpectSimpleQuery("DISCARD ALL"); + await server.ExpectExtendedQuery(); + + var cancelTask = Task.Run(cts.Cancel); + var cancellationRequestTask = postmasterMock.WaitForCancellationRequest().AsTask(); + // Give 1 second to make sure we didn't send cancellation request + await Task.Delay(1000); + Assert.That(cancelTask.IsCompleted, Is.False); + Assert.That(cancellationRequestTask.IsCompleted, Is.False); + + if (failPrependedQuery) { - await cmd.ExecuteScalarAsync(); + await server + .WriteErrorResponse(PostgresErrorCodes.SyntaxError) + .WriteReadyForQuery() + .FlushAsync(); + + await cancelTask; + await cancellationRequestTask; + + Assert.ThrowsAsync(async () => await queryTask); + Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + return; } - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") - .And.Contains("SELECT $1, $2") - .And.Contains("Parameters: [8, NULL]")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); - AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); + await server + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + await cancelTask; + await cancellationRequestTask; + + await server + .WriteErrorResponse(PostgresErrorCodes.QueryCanceled) + .WriteReadyForQuery() + .FlushAsync(); + + Assert.ThrowsAsync(async () => await queryTask); + + queryTask = conn.ExecuteNonQueryAsync("SELECT 1"); + await server.ExpectExtendedQuery(); + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteNoData() + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + await queryTask; } [Test] - public async Task Log_ExecuteScalar_single_statement_with_named_parameters() + public async Task Cancel_while_reading_from_long_running_query() { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); - cmd.Parameters.Add(new() { ParameterName = "p2", NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + await using var conn = await OpenConnectionAsync(); - using (listLoggerProvider.Record()) + await using var cmd = conn.CreateCommand(); + cmd.CommandText = """ +SELECT *, CASE WHEN "t"."i" = 50000 THEN pg_sleep(100) ELSE NULL END +FROM +( + SELECT generate_series(1, 1000000) AS "i" +) AS "t" +"""; + + using (var cts = new CancellationTokenSource()) + await using (var reader = await cmd.ExecuteReaderAsync(cts.Token)) { - await cmd.ExecuteScalarAsync(); + Assert.ThrowsAsync(async () => + { + var i = 0; + while (await reader.ReadAsync(cts.Token)) + { + i++; + if (i == 10) + cts.Cancel(); + } + }); } - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") - .And.Contains("SELECT $1, $2") - .And.Contains("Parameters: [8, NULL]")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); - AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); + cmd.CommandText = "SELECT 42"; + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(42)); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5218")] + [Description("Make sure we do not lose unread messages after resetting oversize buffer")] + public async Task Oversize_buffer_lost_messages() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + NoResetOnClose = true + }; + await using var mock = PgPostmasterMock.Start(csb.ConnectionString); + await using var dataSource = CreateDataSource(mock.ConnectionString); + await using var connection = await dataSource.OpenConnectionAsync(); + var connector = connection.Connector!; - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + var server = await mock.WaitForServerConnection(); + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(TextOid)) + .WriteDataRowWithFlush(Encoding.ASCII.GetBytes(new string('a', connection.Settings.ReadBufferSize * 2))); + // Just to make sure we have enough space + await server.FlushAsync(); + await server + .WriteDataRow("abc"u8.ToArray()) + .WriteCommandComplete() + .WriteReadyForQuery() + .WriteParameterStatus("SomeKey", "SomeValue") + .FlushAsync(); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT 1"; + await using (await cmd.ExecuteReaderAsync()) { } + + await connection.CloseAsync(); + await connection.OpenAsync(); + + Assert.That(connection.Connector, Is.SameAs(connector)); + // We'll get new value after the next query reads ParameterStatus from the buffer + Assert.That(connection.PostgresParameters, Does.Not.ContainKey("SomeKey").WithValue("SomeValue")); + + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(TextOid)) + .WriteDataRow("abc"u8.ToArray()) + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await cmd.ExecuteNonQueryAsync(); + + Assert.That(connection.PostgresParameters, Contains.Key("SomeKey").WithValue("SomeValue")); } [Test] - public async Task Log_ExecuteScalar_single_statement_with_parameter_logging_off() + public async Task Completed_transaction_throws([Values] bool commit) { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); - cmd.Parameters.Add(new() { Value = 8 }); - cmd.Parameters.Add(new() { Value = 9 }); + await using var conn = await OpenConnectionAsync(); + await using var tx = await conn.BeginTransactionAsync(); + await using var cmd = conn.CreateCommand(); - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } + if (commit) + await tx.CommitAsync(); + else + await tx.RollbackAsync(); - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains($"SELECT $1, $2")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + Assert.Throws(() => cmd.Transaction = tx); } - #endregion Logging + [Test, Description("Writing to properties of a disposed command raises ObjectDisposedException.")] + public async Task Disposed_command_throws_on_assignment() + { + await using var conn = await OpenConnectionAsync(); + var command = new NpgsqlCommand("SELECT 1"); + command.Dispose(); + + Assert.Throws(() => command.Connection = conn); + Assert.Throws(() => command.CommandText = "SELECT 2"); - public CommandTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + Assert.That(command.Connection, Is.Null); + Assert.That(command.CommandText, Is.EqualTo("SELECT 1")); + } } diff --git a/test/Npgsql.Tests/ConnectionStringBuilderTests.cs b/test/Npgsql.Tests/ConnectionStringBuilderTests.cs index 58c2ba4bd2..c95e83da16 100644 --- a/test/Npgsql.Tests/ConnectionStringBuilderTests.cs +++ b/test/Npgsql.Tests/ConnectionStringBuilderTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using NUnit.Framework; namespace Npgsql.Tests; @@ -20,15 +20,6 @@ public void Basic() Assert.That(builder.Count, Is.EqualTo(0)); } - [Test] - public void From_string() - { - var builder = new NpgsqlConnectionStringBuilder(); - builder.ConnectionString = "Host=myhost;EF Template Database=foo"; - Assert.That(builder.Host, Is.EqualTo("myhost")); - Assert.That(builder.EntityTemplateDatabase, Is.EqualTo("foo")); - } - [Test] public void TryGetValue() { diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index fd61b4426f..6b32630b9c 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -6,6 +6,7 @@ using System.Linq; using System.Net; using System.Net.Security; +using System.Net.Sockets; using System.Runtime.InteropServices; using System.Security.Cryptography.X509Certificates; using System.Text; @@ -13,7 +14,7 @@ using System.Threading.Tasks; using Npgsql.Internal; using Npgsql.PostgresTypes; -using Npgsql.Properties; +using Npgsql.Tests.Support; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; @@ -21,24 +22,22 @@ namespace Npgsql.Tests; -public class ConnectionTests : MultiplexingTestBase +public class ConnectionTests : TestBase { [Test, Description("Makes sure the connection goes through the proper state lifecycle")] public async Task Basic_lifecycle() { - using var conn = new NpgsqlConnection(ConnectionString); + await using var conn = CreateConnection(); var eventOpen = false; var eventClosed = false; conn.StateChange += (s, e) => { - if (e.OriginalState == ConnectionState.Closed && - e.CurrentState == ConnectionState.Open) + if (e is { OriginalState: ConnectionState.Closed, CurrentState: ConnectionState.Open }) eventOpen = true; - if (e.OriginalState == ConnectionState.Open && - e.CurrentState == ConnectionState.Closed) + if (e is { OriginalState: ConnectionState.Open, CurrentState: ConnectionState.Closed }) eventClosed = true; }; @@ -73,9 +72,6 @@ public async Task Basic_lifecycle() [Test, Description("Makes sure the connection goes through the proper state lifecycle")] public async Task Broken_lifecycle([Values] bool openFromClose) { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(); await using var conn = dataSource.CreateConnection(); @@ -84,12 +80,10 @@ public async Task Broken_lifecycle([Values] bool openFromClose) conn.StateChange += (s, e) => { - if (e.OriginalState == ConnectionState.Closed && - e.CurrentState == ConnectionState.Open) + if (e is { OriginalState: ConnectionState.Closed, CurrentState: ConnectionState.Open }) eventOpen = true; - if (e.OriginalState == ConnectionState.Open && - e.CurrentState == ConnectionState.Closed) + if (e is { OriginalState: ConnectionState.Open, CurrentState: ConnectionState.Closed }) eventClosed = true; }; @@ -116,7 +110,7 @@ public async Task Broken_lifecycle([Values] bool openFromClose) Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); Assert.That(eventClosed, Is.True); Assert.That(conn.Connector is null); - Assert.AreEqual(0, conn.NpgsqlDataSource.Statistics.Total); + Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(0)); if (openFromClose) { @@ -128,18 +122,14 @@ public async Task Broken_lifecycle([Values] bool openFromClose) } Assert.DoesNotThrowAsync(conn.OpenAsync); - Assert.AreEqual(1, await conn.ExecuteScalarAsync("SELECT 1")); - Assert.AreEqual(1, conn.NpgsqlDataSource.Statistics.Total); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(1)); Assert.DoesNotThrowAsync(conn.CloseAsync); } [Test] - [Platform(Exclude = "MacOsX", Reason = "Flaky on MacOS")] public async Task Break_while_open() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); @@ -193,7 +183,6 @@ public async Task Connection_refused_async(bool pooled) #endif [Test] - [Ignore("Fails in a non-determinstic manner and only on the build server... investigate...")] public void Invalid_Username() { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) @@ -211,44 +200,35 @@ public void Invalid_Username() [Test] public void Bad_database() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Database = "does_not_exist" - }; - using (CreateTempPool(builder, out var connectionString)) - using (var conn = new NpgsqlConnection(connectionString)) - Assert.That(() => conn.Open(), - Throws.Exception.TypeOf() - .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.InvalidCatalogName) - ); + using var dataSource = CreateDataSource(csb => csb.Database = "does_not_exist"); + using var conn = dataSource.CreateConnection(); + + Assert.That(() => conn.Open(), + Throws.Exception.TypeOf() + .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.InvalidCatalogName) + ); } [Test, Description("Tests that mandatory connection string parameters are indeed mandatory")] public void Mandatory_connection_string_params() - => Assert.Throws(() => + => Assert.Throws(() => new NpgsqlConnection("User ID=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests")); [Test, Description("Reuses the same connection instance for a failed connection, then a successful one")] public async Task Fail_connect_then_succeed([Values] bool pooling) { - if (IsMultiplexing && !pooling) // Multiplexing doesn't work without pooling - return; - var dbName = GetUniqueIdentifier(nameof(Fail_connect_then_succeed)); await using var conn1 = await OpenConnectionAsync(); await conn1.ExecuteNonQueryAsync($"DROP DATABASE IF EXISTS \"{dbName}\""); try { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - Database = dbName, - Pooling = pooling - }; - - // Create a temp pool to allow us to drop database at the end of the test - using var _ = CreateTempPool(csb, out var connString); + csb.Database = dbName; + csb.Pooling = pooling; + }); - await using var conn2 = new NpgsqlConnection(connString); + await using var conn2 = dataSource.CreateConnection(); var pgEx = Assert.ThrowsAsync(conn2.OpenAsync)!; Assert.That(pgEx.SqlState, Is.EqualTo(PostgresErrorCodes.InvalidCatalogName)); // database doesn't exist Assert.That(conn2.FullState, Is.EqualTo(ConnectionState.Closed)); @@ -267,6 +247,8 @@ public async Task Fail_connect_then_succeed([Values] bool pooling) [Test] public void Open_timeout_unknown_ip([Values(true, false)] bool async) { + const int timeoutSeconds = 2; + var unknownIp = Environment.GetEnvironmentVariable("NPGSQL_UNKNOWN_IP"); if (unknownIp is null) { @@ -274,13 +256,12 @@ public void Open_timeout_unknown_ip([Values(true, false)] bool async) return; } - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Host = unknownIp, - Timeout = 2 - }; - using var _ = CreateTempPool(csb, out var connString); - using var conn = new NpgsqlConnection(connString); + csb.Host = unknownIp; + csb.Timeout = timeoutSeconds; + }); + using var conn = dataSource.CreateConnection(); var sw = Stopwatch.StartNew(); if (async) @@ -296,8 +277,8 @@ public void Open_timeout_unknown_ip([Values(true, false)] bool async) .With.InnerException.TypeOf()); } - Assert.That(sw.Elapsed.TotalMilliseconds, Is.GreaterThanOrEqualTo((csb.Timeout * 1000) - 100), - $"Timeout was supposed to happen after {csb.Timeout} seconds, but fired after {sw.Elapsed.TotalSeconds}"); + Assert.That(sw.Elapsed.TotalMilliseconds, Is.GreaterThanOrEqualTo(timeoutSeconds * 1000 - 100), + $"Timeout was supposed to happen after {timeoutSeconds} seconds, but fired after {sw.Elapsed.TotalSeconds}"); Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); } @@ -323,6 +304,38 @@ public void Connect_timeout_cancel() Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); } + [Test] + public void Bad_hostname() + { + using var dataSource = CreateDataSource(csb => csb.Host = "hostname.invalid"); + using var conn = dataSource.CreateConnection(); + + Assert.That( + () => conn.Open(), + Throws.Exception + .TypeOf() + .With + .Property(nameof(NpgsqlException.InnerException)) + .TypeOf() + ); + } + + [Test] + public void Bad_hostname_async() + { + using var dataSource = CreateDataSource(csb => csb.Host = "hostname.invalid"); + using var conn = dataSource.CreateConnection(); + + Assert.That( + async () => await conn.OpenAsync(), + Throws.Exception + .TypeOf() + .With + .Property(nameof(NpgsqlException.InnerException)) + .TypeOf() + ); + } + #endregion #region Client Encoding @@ -344,10 +357,8 @@ public async Task Client_encoding_env_var() // Note that the pool is unaware of the environment variable, so if a connection is // returned from the pool it may contain the wrong client_encoding using var _ = SetEnvironmentVariable("PGCLIENTENCODING", "SQL_ASCII"); - using var __ = CreateTempPool(ConnectionString, out var connectionString); - - var connString = new NpgsqlConnectionStringBuilder(connectionString); - using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW client_encoding"), Is.EqualTo("SQL_ASCII")); } @@ -356,8 +367,8 @@ public async Task Client_encoding_connection_param() { using (var conn = await OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SHOW client_encoding"), Is.Not.EqualTo("SQL_ASCII")); - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { ClientEncoding = "SQL_ASCII" }; - using (var conn = await OpenConnectionAsync(connString)) + await using var dataSource = CreateDataSource(csb => csb.ClientEncoding = "SQL_ASCII"); + using (var conn = await dataSource.OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SHOW client_encoding"), Is.EqualTo("SQL_ASCII")); } @@ -380,8 +391,8 @@ public async Task Timezone_env_var() // Note that the pool is unaware of the environment variable, so if a connection is // returned from the pool it may contain the wrong timezone using var _ = SetEnvironmentVariable("PGTZ", newTimezone); - using var __ = CreateTempPool(ConnectionString, out var connectionString); - using var conn2 = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + using var conn2 = await dataSource.OpenConnectionAsync(); Assert.That(await conn2.ExecuteScalarAsync("SHOW TIMEZONE"), Is.EqualTo(newTimezone)); } @@ -396,17 +407,54 @@ public async Task Timezone_connection_param() : "Africa/Bamako"; } - var _ = CreateTempPool(ConnectionString, out var connString); - var builder = new NpgsqlConnectionStringBuilder(connString) - { - Timezone = newTimezone - }; - using (var conn = await OpenConnectionAsync(builder.ConnectionString)) + await using var dataSource = CreateDataSource(csb => csb.Timezone = newTimezone); + using (var conn = await dataSource.OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SHOW TIMEZONE"), Is.EqualTo(newTimezone)); } #endregion Timezone + #region Application Name + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6133")] + [NonParallelizable] // Sets environment variable + public async Task Application_name_env_var() + { + const string testAppName = "MyTestApp"; + + // Note that the pool is unaware of the environment variable, so if a connection is + // returned from the pool it may contain the wrong application name + using var _ = SetEnvironmentVariable("PGAPPNAME", testAppName); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(testAppName)); + } + + [Test] + public async Task Application_name_connection_param() + { + const string testAppName = "MyTestApp2"; + + await using var dataSource = CreateDataSource(csb => csb.ApplicationName = testAppName); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(testAppName)); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Application_name_connection_param_overrides_env_var() + { + const string envAppName = "EnvApp"; + const string connAppName = "ConnApp"; + + using var _ = SetEnvironmentVariable("PGAPPNAME", envAppName); + await using var dataSource = CreateDataSource(csb => csb.ApplicationName = connAppName); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(connAppName)); + } + + #endregion Application Name + #region ConnectionString - Host [TestCase("127.0.0.1", ExpectedResult = new [] { "127.0.0.1:5432" })] @@ -427,7 +475,7 @@ public async Task Timezone_connection_param() "localhost:5432", "localhost:5432" })] - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3802"), NonParallelizable] + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3802")] public string[] ConnectionString_Host(string host) { var dataSourceBuilder = new NpgsqlDataSourceBuilder @@ -463,17 +511,13 @@ public async Task Unix_domain_socket() return; } - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Host = dir - }; - try { - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Host = dir); + await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1", tx), Is.EqualTo(1)); - Assert.That(conn.DataSource, Is.EqualTo(Path.Combine(csb.Host, $".s.PGSQL.{port}"))); + Assert.That(conn.DataSource, Is.EqualTo(Path.Combine(dir, $".s.PGSQL.{port}"))); } catch (Exception ex) { @@ -502,7 +546,8 @@ public async Task Unix_abstract_domain_socket() try { - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb.ToString()); + await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1", tx), Is.EqualTo(1)); Assert.That(conn.DataSource, Is.EqualTo(Path.Combine(csb.Host, $".s.PGSQL.{csb.Port}"))); @@ -514,17 +559,19 @@ public async Task Unix_abstract_domain_socket() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/903")] - public async Task DataSource_property() + public void DataSource_property() { using var conn = new NpgsqlConnection(); Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); - conn.ConnectionString = ConnectionString; - Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); + var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - await conn.OpenAsync(); - await using var _ = await conn.BeginTransactionAsync(); - Assert.That(conn.DataSource, Is.EqualTo($"tcp://{conn.Host}:{conn.Port}")); + conn.ConnectionString = csb.ConnectionString; + Assert.That(conn.DataSource, Is.EqualTo($"tcp://{csb.Host}:{csb.Port}")); + + csb.Host = "127.0.0.1, 127.0.0.2"; + conn.ConnectionString = csb.ConnectionString; + Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); } #region Server version @@ -665,6 +712,83 @@ public void Set_connection_string_to_empty() Assert.That(() => conn.Open(), Throws.Exception.TypeOf()); } + [Test] + [TestCase("test_schema_1", "public", true)] + [TestCase("test_schema_1", "test_schema_2", true)] + [TestCase("test_schema_2", "test_schema_3", true)] + [TestCase("test_schema_1", "public", false)] + [TestCase("test_schema_1", "test_schema_2", false)] + [TestCase("test_schema_2", "test_schema_3", false)] + [TestCase("'DROP TABLE X", "'COMMIT; ", false)] + [Parallelizable(ParallelScope.None)] + public async Task Set_Schemas_And_Load_Relevant_Types(string testSchema, string otherSchema, bool enabled) + { + await using var conn1 = await OpenConnectionAsync(); + try + { + await conn1.ExecuteNonQueryAsync("DROP TYPE IF EXISTS public.test_type_1"); + await conn1.ExecuteNonQueryAsync("DROP TYPE IF EXISTS public.test_type_2"); + await conn1.ExecuteNonQueryAsync("DROP TYPE IF EXISTS public.test_type_3"); + await conn1.ExecuteNonQueryAsync("CREATE TYPE public.test_type_3 AS (id int, name text)"); + + if (testSchema != "public") + { + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{testSchema}\" CASCADE"); + await conn1.ExecuteNonQueryAsync($"CREATE SCHEMA \"{testSchema}\""); + } + + if (otherSchema != "public") + { + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{otherSchema}\" CASCADE"); + await conn1.ExecuteNonQueryAsync($"CREATE SCHEMA \"{otherSchema}\""); + } + + await conn1.ExecuteNonQueryAsync($"DROP TYPE IF EXISTS \"{testSchema}\".test_type_1"); + await conn1.ExecuteNonQueryAsync($"CREATE TYPE \"{testSchema}\".test_type_1 AS (id int)"); + await conn1.ExecuteNonQueryAsync($"DROP TYPE IF EXISTS \"{otherSchema}\".test_type_2"); + await conn1.ExecuteNonQueryAsync($"CREATE TYPE \"{otherSchema}\".test_type_2 AS (id int, name text)"); + + using var dataSource = CreateDataSource(builder => + { + builder.ConfigureTypeLoading(builder => + { + if (enabled) + builder.SetTypeLoadingSchemas(testSchema, otherSchema); + }); + }); + using var conn = await dataSource.OpenConnectionAsync(); + var databaseInfo = dataSource.CurrentReloadableState.DatabaseInfo; + if (enabled) + { + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + if (testSchema == "public" || otherSchema == "public") + { + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + } + else + { + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3"), Is.False); + } + } + else + { + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + } + } + finally + { + if (testSchema != "public") + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{testSchema}\" CASCADE"); + if (otherSchema != "public") + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{otherSchema}\" CASCADE"); + } + + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/703")] public async Task No_database_defaults_to_username() { @@ -677,14 +801,10 @@ public async Task No_database_defaults_to_username() } [Test, Description("Breaks a connector while it's in the pool, with a keepalive and without")] - [Platform(Exclude = "MacOsX", Reason = "Fails only on mac, needs to be investigated")] [TestCase(false, TestName = nameof(Break_connector_in_pool) + "_without_keep_alive")] [TestCase(true, TestName = nameof(Break_connector_in_pool) + "_with_keep_alive")] public async Task Break_connector_in_pool(bool keepAlive) { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, hanging"); - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.ConnectionStringBuilder.MaxPoolSize = 1; if (keepAlive) @@ -721,25 +841,13 @@ public async Task Break_connector_in_pool(bool keepAlive) [IssueLink("https://github.com/npgsql/npgsql/issues/4603")] public async Task Reload_types_keepalive_concurrent() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing doesn't support keepalive"); - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - KeepAlive = 1, - }; - using var _ = CreateTempPool(csb, out var connString); - - await using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(csb => csb.KeepAlive = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var startTimestamp = Stopwatch.GetTimestamp(); // Give a few seconds for a KeepAlive to possibly perform - while (GetElapsedTime(startTimestamp).TotalSeconds < 2) + while (Stopwatch.GetElapsedTime(startTimestamp).TotalSeconds < 2) Assert.DoesNotThrow(conn.ReloadTypes); - - // dotnet 3.1 doesn't have Stopwatch.GetElapsedTime method. - static TimeSpan GetElapsedTime(long startingTimestamp) => - new((long)((Stopwatch.GetTimestamp() - startingTimestamp) * ((double)10000000 / Stopwatch.Frequency))); } #region ChangeDatabase @@ -782,17 +890,10 @@ public void ChangeDatabase_connection_on_closed_connection_throws() [Test, Description("Tests closing a connector while a reader is open")] public async Task Close_during_read([Values(PooledOrNot.Pooled, PooledOrNot.Unpooled)] PooledOrNot pooled) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - if (pooled == PooledOrNot.Unpooled) - { - if (IsMultiplexing) - return; // Multiplexing requires pooling - csb.Pooling = false; - } - - using var conn = await OpenConnectionAsync(csb); - using (var cmd = new NpgsqlCommand("SELECT 1", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using var dataSource = CreateDataSource(csb => csb.Pooling = pooled == PooledOrNot.Pooled); + await using var conn = await dataSource.OpenConnectionAsync(); + await using (var cmd = new NpgsqlCommand("SELECT 1", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); conn.Close(); @@ -808,19 +909,18 @@ public async Task Close_during_read([Values(PooledOrNot.Pooled, PooledOrNot.Unpo [Test] public async Task Search_path() { - using var conn = await OpenConnectionAsync(new NpgsqlConnectionStringBuilder(ConnectionString) { SearchPath = "foo" }); + await using var dataSource = CreateDataSource(csb => csb.SearchPath = "foo"); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW search_path"), Contains.Substring("foo")); } [Test] public async Task Set_options() { - using var _ = CreateTempPool(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c default_transaction_isolation=serializable -c default_transaction_deferrable=on -c foo.bar=My\\ Famous\\\\Thing" - }, out var connectionString); - - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => + csb.Options = + "-c default_transaction_isolation=serializable -c default_transaction_deferrable=on -c foo.bar=My\\ Famous\\\\Thing"); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_isolation"), Is.EqualTo("serializable")); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_deferrable"), Is.EqualTo("on")); @@ -835,10 +935,9 @@ public async Task Connector_not_initialized_exception() for (var i = 0; i < 2; i++) { - using var connection = new NpgsqlConnection(ConnectionString); - connection.Open(); + await using var connection = await OpenConnectionAsync(); command.Connection = connection; - var tx = connection.BeginTransaction(); + await using var tx = await connection.BeginTransactionAsync(); await command.ExecuteScalarAsync(); await tx.CommitAsync(); } @@ -853,7 +952,7 @@ public void Bug1011001() var cs1 = csb1.ToString(); var csb2 = new NpgsqlConnectionStringBuilder(cs1); var cs2 = csb2.ToString(); - Assert.IsTrue(cs1 == cs2); + Assert.That(cs1 == cs2); } [Test, IssueLink("https://github.com/npgsql/npgsql/pull/164")] @@ -861,7 +960,7 @@ public void Connection_State_is_Closed_when_disposed() { var c = new NpgsqlConnection(); c.Dispose(); - Assert.AreEqual(ConnectionState.Closed, c.State); + Assert.That(c.State, Is.EqualTo(ConnectionState.Closed)); } [Test] @@ -875,11 +974,9 @@ public void Change_ApplicationName_with_connection_string_builder() [Test, Description("Makes sure notices are probably received and emitted as events")] public async Task Notice() { - await using var conn = await OpenConnectionAsync(new NpgsqlConnectionStringBuilder(ConnectionString) - { - // Make sure messages are in English - Options = "-c lc_messages=en_US.UTF-8" - }); + // Make sure messages are in English + await using var dataSource = CreateDataSource(csb => csb.Options = "-c lc_messages=en_US.UTF-8"); + await using var conn = await dataSource.OpenConnectionAsync(); var function = await GetTempFunctionName(conn); await conn.ExecuteNonQueryAsync($@" CREATE OR REPLACE FUNCTION {function}() RETURNS VOID AS @@ -912,8 +1009,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, Description("Makes sure that concurrent use of the connection throws an exception")] public async Task Concurrent_use_throws() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); using (var cmd = new NpgsqlCommand("SELECT 1", conn)) using (await cmd.ExecuteReaderAsync()) @@ -936,9 +1031,6 @@ public async Task Concurrent_use_throws() [IssueLink("https://github.com/npgsql/npgsql/issues/783")] public void PersistSecurityInfo_is_true([Values(true, false)] bool pooling) { - if (IsMultiplexing && !pooling) - return; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { PersistSecurityInfo = true, @@ -955,9 +1047,6 @@ public void PersistSecurityInfo_is_true([Values(true, false)] bool pooling) [IssueLink("https://github.com/npgsql/npgsql/issues/783")] public void No_password_without_PersistSecurityInfo([Values(true, false)] bool pooling) { - if (IsMultiplexing && !pooling) - return; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { Pooling = pooling @@ -971,7 +1060,7 @@ public void No_password_without_PersistSecurityInfo([Values(true, false)] bool p } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2725")] - public void Clone_with_PersistSecurityInfo() + public async Task Clone_with_PersistSecurityInfo([Values] bool async) { var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { @@ -984,20 +1073,24 @@ public void Clone_with_PersistSecurityInfo() // First un-persist, should work builder.PersistSecurityInfo = false; var connStringWithoutPersist = builder.ToString(); - using var clonedWithoutPersist = connWithPersist.CloneWith(connStringWithoutPersist); + using var clonedWithoutPersist = async + ? await connWithPersist.CloneWithAsync(connStringWithoutPersist) + : connWithPersist.CloneWith(connStringWithoutPersist); clonedWithoutPersist.Open(); Assert.That(clonedWithoutPersist.ConnectionString, Does.Not.Contain("Password=")); // Then attempt to re-persist, should not work - using var clonedConn = clonedWithoutPersist.CloneWith(connStringWithPersist); + using var clonedConn = async + ? await clonedWithoutPersist.CloneWithAsync(connStringWithPersist) + : clonedWithoutPersist.CloneWith(connStringWithPersist); clonedConn.Open(); Assert.That(clonedConn.ConnectionString, Does.Not.Contain("Password=")); } [Test] - public async Task CloneWith_and_data_source_with_password() + public async Task CloneWith_and_data_source_with_password([Values] bool async) { var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString); // Set the password via the data source property later to make sure that's picked up by CloneWith @@ -1010,33 +1103,41 @@ public async Task CloneWith_and_data_source_with_password() // Test that the up-to-date password gets copied to the clone, as if we opened the original connection instead of cloning it using var _ = CreateTempPool(new NpgsqlConnectionStringBuilder(ConnectionString) { Password = null }, out var tempConnectionString); - await using var clonedConnection = connection.CloneWith(tempConnectionString); + await using var clonedConnection = async + ? await connection.CloneWithAsync(tempConnectionString) + : connection.CloneWith(tempConnectionString); await clonedConnection.OpenAsync(); } [Test] - public async Task CloneWith_and_data_source_with_auth_callbacks() + public async Task CloneWith_and_data_source_with_auth_callbacks([Values] bool async) { var (userCertificateValidationCallbackCalled, clientCertificatesCallbackCalled) = (false, false); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseUserCertificateValidationCallback(UserCertificateValidationCallback); - dataSourceBuilder.UseClientCertificatesCallback(ClientCertificatesCallback); + dataSourceBuilder.UseSslClientAuthenticationOptionsCallback(options => + { + ClientCertificatesCallback(options.ClientCertificates); + options.RemoteCertificateValidationCallback = UserCertificateValidationCallback; + }); await using var dataSource = dataSourceBuilder.Build(); await using var connection = dataSource.CreateConnection(); using var _ = CreateTempPool(ConnectionString, out var tempConnectionString); - await using var clonedConnection = connection.CloneWith(tempConnectionString); + await using var clonedConnection = async + ? await connection.CloneWithAsync(tempConnectionString) + : connection.CloneWith(tempConnectionString); - clonedConnection.UserCertificateValidationCallback!(null!, null, null, SslPolicyErrors.None); - Assert.True(userCertificateValidationCallbackCalled); - clonedConnection.ProvideClientCertificatesCallback!(null!); - Assert.True(clientCertificatesCallbackCalled); + var sslClientAuthenticationOptions = new SslClientAuthenticationOptions(); + clonedConnection.SslClientAuthenticationOptionsCallback!(sslClientAuthenticationOptions); + Assert.That(clientCertificatesCallbackCalled); + sslClientAuthenticationOptions.RemoteCertificateValidationCallback!(null!, null, null, SslPolicyErrors.None); + Assert.That(userCertificateValidationCallbackCalled); bool UserCertificateValidationCallback(object sender, X509Certificate? certificate, X509Chain? chain, SslPolicyErrors errors) => userCertificateValidationCallbackCalled = true; - void ClientCertificatesCallback(X509CertificateCollection certs) + void ClientCertificatesCallback(X509CertificateCollection? certs) => clientCertificatesCallbackCalled = true; } @@ -1049,18 +1150,15 @@ public void Clone() { using var pool = CreateTempPool(ConnectionString, out var connectionString); using var conn = new NpgsqlConnection(connectionString); - ProvideClientCertificatesCallback callback1 = certificates => { }; - conn.ProvideClientCertificatesCallback = callback1; - RemoteCertificateValidationCallback callback2 = (sender, certificate, chain, errors) => true; - conn.UserCertificateValidationCallback = callback2; + Action callback = _ => { }; + conn.SslClientAuthenticationOptionsCallback = callback; conn.Open(); Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); using var conn2 = (NpgsqlConnection)((ICloneable)conn).Clone(); Assert.That(conn2.ConnectionString, Is.EqualTo(conn.ConnectionString)); - Assert.That(conn2.ProvideClientCertificatesCallback, Is.SameAs(callback1)); - Assert.That(conn2.UserCertificateValidationCallback, Is.SameAs(callback2)); + Assert.That(conn2.SslClientAuthenticationOptionsCallback, Is.SameAs(callback)); conn2.Open(); Assert.That(async () => await conn2.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } @@ -1068,44 +1166,41 @@ public void Clone() [Test] public async Task Clone_with_data_source() { - await using var connection = await SharedDataSource.OpenConnectionAsync(); + await using var connection = await DataSource.OpenConnectionAsync(); await using var clonedConnection = (NpgsqlConnection)((ICloneable)connection).Clone(); - Assert.That(clonedConnection.NpgsqlDataSource, Is.SameAs(SharedDataSource)); + Assert.That(clonedConnection.NpgsqlDataSource, Is.SameAs(DataSource)); Assert.DoesNotThrowAsync(() => clonedConnection.OpenAsync()); } [Test] - [NonParallelizable] // Anyone can reload DatabaseInfo between us opening a connection public async Task DatabaseInfo_is_shared() { - if (IsMultiplexing) - return; // Create a temp pool to make sure the second connection will be new and not idle - using var _ = CreateTempPool(ConnectionString, out var connString); - using var conn1 = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn1 = await dataSource.OpenConnectionAsync(); // Call RealoadTypes to force reload DatabaseInfo conn1.ReloadTypes(); - using var conn2 = await OpenConnectionAsync(connString); + await using var conn2 = await dataSource.OpenConnectionAsync(); Assert.That(conn1.Connector!.DatabaseInfo, Is.SameAs(conn2.Connector!.DatabaseInfo)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/736")] public async Task ManyOpenClose() { + await using var dataSource = CreateDataSource(); // The connector's _sentRfqPrependedMessages is a byte, too many open/closes made it overflow for (var i = 0; i < 255; i++) { - using var conn = new NpgsqlConnection(ConnectionString); - conn.Open(); + await using var conn = await dataSource.OpenConnectionAsync(); } - using (var conn = new NpgsqlConnection(ConnectionString)) + await using (var conn = dataSource.CreateConnection()) { - conn.Open(); + await conn.OpenAsync(); } - using (var conn = new NpgsqlConnection(ConnectionString)) + await using (var conn = dataSource.CreateConnection()) { - conn.Open(); + await conn.OpenAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } } @@ -1113,38 +1208,37 @@ public async Task ManyOpenClose() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/736")] public async Task Many_open_close_with_transaction() { + await using var dataSource = CreateDataSource(); // The connector's _sentRfqPrependedMessages is a byte, too many open/closes made it overflow for (var i = 0; i < 255; i++) { - using var conn = await OpenConnectionAsync(); - conn.BeginTransaction(); + await using var conn = await dataSource.OpenConnectionAsync(); + await conn.BeginTransactionAsync(); } - using (var conn = await OpenConnectionAsync()) + await using (var conn = await dataSource.OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/927")] [IssueLink("https://github.com/npgsql/npgsql/issues/736")] - [Ignore("Fails when running the entire test suite but not on its own...")] public async Task Rollback_on_close() { // Npgsql 3.0.0 to 3.0.4 prepended a rollback for the next time the connector is used, as an optimization. // This caused some issues (#927) and was removed. - // Clear connections in pool as we're going to need to reopen the same connection - var dummyConn = new NpgsqlConnection(ConnectionString); - NpgsqlConnection.ClearPool(dummyConn); + await using var dataSource = CreateDataSource(); int processId; - using (var conn = await OpenConnectionAsync()) + await using (var conn = await dataSource.OpenConnectionAsync()) { processId = conn.Connector!.BackendProcessId; - conn.BeginTransaction(); + await conn.BeginTransactionAsync(); await conn.ExecuteNonQueryAsync("SELECT 1"); Assert.That(conn.Connector.TransactionStatus, Is.EqualTo(TransactionStatus.InTransactionBlock)); } - using (var conn = await OpenConnectionAsync()) + + await using (var conn = await dataSource.OpenConnectionAsync()) { Assert.That(conn.Connector!.BackendProcessId, Is.EqualTo(processId)); Assert.That(conn.Connector.TransactionStatus, Is.EqualTo(TransactionStatus.Idle)); @@ -1153,17 +1247,14 @@ public async Task Rollback_on_close() [Test, Description("Tests an exception happening when sending the Terminate message while closing a ready connector")] [IssueLink("https://github.com/npgsql/npgsql/issues/777")] - [Ignore("Flaky")] public async Task Exception_during_close() { - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.Pooling = false; - await using var dataSource = dataSourceBuilder.Build(); - using var conn = await dataSource.OpenConnectionAsync(); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); var connectorId = conn.ProcessID; using (var conn2 = await OpenConnectionAsync()) - conn2.ExecuteNonQuery($"SELECT pg_terminate_backend({connectorId})"); + await conn2.ExecuteNonQueryAsync($"SELECT pg_terminate_backend({connectorId})"); conn.Close(); } @@ -1171,13 +1262,8 @@ public async Task Exception_during_close() [Test, Description("Some pseudo-PG database don't support pg_type loading, we have a minimal DatabaseInfo for this")] public async Task NoTypeLoading() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading - }; - - using var _ = CreateTempPool(builder, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(builder => builder.ConfigureTypeLoading(builder => builder.EnableTypeLoading())); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 8"), Is.EqualTo(8)); Assert.That(await conn.ExecuteScalarAsync("SELECT 'foo'"), Is.EqualTo("foo")); @@ -1202,17 +1288,14 @@ public async Task NoTypeLoading() }; Assert.That(async () => await cmd.ExecuteScalarAsync(), - Throws.Exception.TypeOf() - .With.Message.EqualTo(string.Format(NpgsqlStrings.NoMultirangeTypeFound, "integer"))); + Throws.Exception.TypeOf() + .With.Message.EqualTo("The NpgsqlDbType 'IntegerMultirange' isn't present in your database. You may need to install an extension or upgrade to a newer version.")); } } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1158")] public async Task Table_named_record() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - using var conn = await OpenConnectionAsync(); await conn.ExecuteNonQueryAsync(@" @@ -1230,28 +1313,32 @@ await conn.ExecuteNonQueryAsync(@" } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/392")] - [NonParallelizable] - [Platform(Exclude = "MacOsX", Reason = "Flaky in CI on Mac")] + [NonParallelizable] // Drops and creates same database across modes public async Task Non_UTF8_Encoding() { Encoding.RegisterProvider(CodePagesEncodingProvider.Instance); await using var adminConn = await OpenConnectionAsync(); + // Create the database with server encoding sql-ascii + // Starting with PG16, the default locale provider is icu, which does not support encoding sql_ascii. Specify libc explicitly as the + // locale provider (except for older versions where specifying explicitly isn't supported, and libc is the only possibility). await adminConn.ExecuteNonQueryAsync("DROP DATABASE IF EXISTS sqlascii"); - await adminConn.ExecuteNonQueryAsync("CREATE DATABASE sqlascii ENCODING 'sql_ascii' TEMPLATE template0"); + await adminConn.ExecuteNonQueryAsync( + adminConn.PostgreSqlVersion >= new Version(15, 0) + ? "CREATE DATABASE sqlascii ENCODING 'sql_ascii' LOCALE_PROVIDER libc TEMPLATE template0" + : "CREATE DATABASE sqlascii ENCODING 'sql_ascii' TEMPLATE template0"); + try { // Insert some win1252 data - var goodBuilder = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var goodDataSource = CreateDataSource(csb => { - Database = "sqlascii", - Encoding = "windows-1252", - ClientEncoding = "sql-ascii", - }; - - using var _ = CreateTempPool(goodBuilder, out var goodConnectionString); + csb.Database = "sqlascii"; + csb.Encoding = "windows-1252"; + csb.ClientEncoding = "sql-ascii"; + }); - await using (var conn = await OpenConnectionAsync(goodConnectionString)) + await using (var conn = await goodDataSource.OpenConnectionAsync()) { const string value = "éàç"; await conn.ExecuteNonQueryAsync("CREATE TABLE foo (bar TEXT)"); @@ -1260,7 +1347,7 @@ public async Task Non_UTF8_Encoding() await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT * FROM foo"; await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); using (var textReader = await reader.GetTextReaderAsync(0)) Assert.That(textReader.ReadToEnd(), Is.EqualTo(value)); @@ -1268,12 +1355,8 @@ public async Task Non_UTF8_Encoding() } // A normal connection with the default UTF8 encoding and client_encoding should fail - var badBuilder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Database = "sqlascii", - }; - using var __ = CreateTempPool(badBuilder, out var badConnectionString); - await using (var conn = await OpenConnectionAsync(badConnectionString)) + await using var badDataSource = CreateDataSource(csb => csb.Database = "sqlascii"); + await using (var conn = await badDataSource.OpenConnectionAsync()) { Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM foo"), Throws.Exception.TypeOf() @@ -1291,43 +1374,38 @@ public async Task Non_UTF8_Encoding() [Test] public async Task Oversize_buffer() { - if (IsMultiplexing) - return; - - using (CreateTempPool(ConnectionString, out var connectionString)) - using (var conn = await OpenConnectionAsync(connectionString)) - { - var csb = new NpgsqlConnectionStringBuilder(connectionString); - - Assert.That(conn.Connector!.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - // Read a big row, we should now be using an oversize buffer - var bigString1 = new string('x', conn.Connector.ReadBuffer.Size + 1); - using (var cmd = new NpgsqlCommand($"SELECT '{bigString1}'", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetString(0), Is.EqualTo(bigString1)); - } - var size1 = conn.Connector.ReadBuffer.Size; - Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(csb.ReadBufferSize)); + Assert.That(conn.Connector!.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); - // Even bigger oversize buffer - var bigString2 = new string('x', conn.Connector.ReadBuffer.Size + 1); - using (var cmd = new NpgsqlCommand($"SELECT '{bigString2}'", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetString(0), Is.EqualTo(bigString2)); - } - Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(size1)); + // Read a big row, we should now be using an oversize buffer + var bigString1 = new string('x', conn.Connector.ReadBuffer.Size + 1); + using (var cmd = new NpgsqlCommand($"SELECT '{bigString1}'", conn)) + using (var reader = await cmd.ExecuteReaderAsync()) + { + reader.Read(); + Assert.That(reader.GetString(0), Is.EqualTo(bigString1)); + } + var size1 = conn.Connector.ReadBuffer.Size; + Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(csb.ReadBufferSize)); - var processId = conn.ProcessID; - conn.Close(); - conn.Open(); - Assert.That(conn.ProcessID, Is.EqualTo(processId)); - Assert.That(conn.Connector.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); + // Even bigger oversize buffer + var bigString2 = new string('x', conn.Connector.ReadBuffer.Size + 1); + using (var cmd = new NpgsqlCommand($"SELECT '{bigString2}'", conn)) + using (var reader = await cmd.ExecuteReaderAsync()) + { + reader.Read(); + Assert.That(reader.GetString(0), Is.EqualTo(bigString2)); } + Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(size1)); + + var processId = conn.ProcessID; + conn.Close(); + conn.Open(); + Assert.That(conn.ProcessID, Is.EqualTo(processId)); + Assert.That(conn.Connector.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); } #region Keepalive @@ -1335,46 +1413,34 @@ public async Task Oversize_buffer() [Test, Explicit, Description("Turns on TCP keepalive and sleeps forever, good for wiresharking")] public async Task TcpKeepaliveTime() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - TcpKeepAliveTime = 2 - }; - using (await OpenConnectionAsync(csb)) + await using var dataSource = CreateDataSource(csb => csb.TcpKeepAliveTime = 2); + using (await dataSource.OpenConnectionAsync()) Thread.Sleep(Timeout.Infinite); } [Test, Explicit, Description("Turns on TCP keepalive and sleeps forever, good for wiresharking")] public async Task TcpKeepalive() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - TcpKeepAlive = true - }; - using (await OpenConnectionAsync(csb)) + await using var dataSource = CreateDataSource(csb => csb.TcpKeepAlive = true); + await using (await dataSource.OpenConnectionAsync()) Thread.Sleep(Timeout.Infinite); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3511")] public async Task Keepalive_with_failed_transaction() { - if (IsMultiplexing) - return; - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - KeepAlive = 1 - }; - using var conn = await OpenConnectionAsync(csb); - using var tx = await conn.BeginTransactionAsync(); + await using var dataSource = CreateDataSource(csb => csb.KeepAlive = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var tx = await conn.BeginTransactionAsync(); - Assert.Throws(() => conn.ExecuteScalar("SELECT non_existent_table")); + Assert.ThrowsAsync(async () => await conn.ExecuteScalarAsync("SELECT non_existent_table")); // Connection is now in a failed transaction state. Wait a bit to allow for the keepalive to execute. Thread.Sleep(3000); await tx.RollbackAsync(); // Confirm that the connection is still open and usable - Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } #endregion Keepalive @@ -1382,9 +1448,6 @@ public async Task Keepalive_with_failed_transaction() [Test] public async Task Change_parameter() { - if (IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var defaultApplicationName = conn.PostgresParameters["application_name"]; await conn.ExecuteNonQueryAsync("SET application_name = 'some_test_value'"); @@ -1401,8 +1464,8 @@ public async Task Connect_OptionsFromEnvironment_Succeeds() { using (SetEnvironmentVariable("PGOPTIONS", "-c default_transaction_isolation=serializable -c default_transaction_deferrable=on -c foo.bar=My\\ Famous\\\\Thing")) { - using var _ = CreateTempPool(ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_isolation"), Is.EqualTo("serializable")); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_deferrable"), Is.EqualTo("on")); Assert.That(await conn.ExecuteScalarAsync("SHOW foo.bar"), Is.EqualTo("My Famous\\Thing")); @@ -1414,22 +1477,118 @@ public async Task Connect_OptionsFromEnvironment_Succeeds() [TestCase(false, TestName = "NoNoResetOnClose")] public async Task NoResetOnClose(bool noResetOnClose) { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) + var originalApplicationName = new NpgsqlConnectionStringBuilder(ConnectionString).ApplicationName ?? ""; + + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - NoResetOnClose = noResetOnClose - }; - using var _ = CreateTempPool(builder, out var connectionString); - var original = new NpgsqlConnectionStringBuilder(connectionString).ApplicationName; + csb.MaxPoolSize = 1; + csb.NoResetOnClose = noResetOnClose; + }); - using var conn = await OpenConnectionAsync(connectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET application_name = 'modified'"); await conn.CloseAsync(); await conn.OpenAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW application_name"), Is.EqualTo( - noResetOnClose || IsMultiplexing + noResetOnClose ? "modified" - : original)); + : originalApplicationName)); + } + + [Test] + [Description("Test whether the internal NpgsqlConnection.Open method stays on the same thread with async=false")] + public async Task Sync_open_blocked_same_thread() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxPoolSize = 1; + }); + + await using var openConnection = await dataSource.OpenConnectionAsync(); + + // 2 tasks are usually enough to reproduce the issue + const int taskCount = 2; + + var tcs = new TaskCompletionSource[taskCount]; + for (var i = 0; i < tcs.Length; i++) + { + tcs[i] = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); + } + var sameThreadTasks = Enumerable.Range(0, taskCount).Select(x => Task.Run(async () => + { + var beforeOpenThread = Thread.CurrentThread; + tcs[x].SetResult(null); + using var conn = dataSource.CreateConnection(); + // even though we await it should complete synchronously due to async = false + await conn.Open(async: false, CancellationToken.None); + return beforeOpenThread == Thread.CurrentThread; + })).ToList(); + + await Task.WhenAll(tcs.Select(x => x.Task)); + // Just in case give them a second to block on getting a connection from the pool + await Task.Delay(1000); + await openConnection.CloseAsync(); + + foreach (var sameThreadTask in sameThreadTasks) + { + Assert.That(await sameThreadTask, "Synchronous open completed on different thread"); + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6427")] + [Platform(Include = "Win")] // Hangs on linux and mac (probably because of missing kerberos token) + public async Task Gss_encryption_retry_does_not_clear_pool() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + GssEncryptionMode = GssEncryptionMode.Prefer, + NoResetOnClose = false + }; + // Break connection on gss encryption request to force the client to create a new connection and retry again + // This emulates the behavior of older versions of PostgreSQL or its forks, like Supabase + await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString, breakOnGssEncryptionRequest: true); + await using var dataSource = CreateDataSource(builder => + { + builder.ConnectionStringBuilder.ConnectionString = postmaster.ConnectionString; + // We use kerberos by default, which requires specific credentials to work + // Change it negotiate so SSPI on windows can use NTLM credentials + builder.UseNegotiateOptionsCallback(options => options.Package = "Negotiate"); + }); + + PgServerMock server; + + int processID; + await using (var conn = await dataSource.OpenConnectionAsync()) + { + processID = conn.ProcessID; + + // The next connection request isn't valid because it was retried + await postmaster.SkipNextConnection(); + + var queryTask = conn.ExecuteNonQueryAsync("SELECT 1"); + + server = await postmaster.WaitForServerConnection(); + await server.ExpectExtendedQuery(); + await server.WriteScalarResponseAndFlush(1); + await queryTask; + } + + // The second time we get a connection from the pool we should ge the exact same connection + await using (var conn = await dataSource.OpenConnectionAsync()) + { + Assert.That(conn.ProcessID, Is.EqualTo(processID)); + + var queryTask = conn.ExecuteNonQueryAsync("SELECT 1"); + + // We do not set NoResetOnClose=true on connection string to test query behavior after connection retry + await server.ExpectSimpleQuery("DISCARD ALL"); + await server.ExpectExtendedQuery(); + server + .WriteCommandComplete() + .WriteReadyForQuery(); + await server.WriteScalarResponseAndFlush(1); + await queryTask; + } } #region Physical connection initialization @@ -1485,11 +1644,6 @@ public async Task PhysicalConnectionInitializer_async() [Test] public async Task PhysicalConnectionInitializer_sync_with_break() { - if (IsMultiplexing) // Sync I/O - return; - - await using var adminConn = await OpenConnectionAsync(); - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( conn => @@ -1510,8 +1664,6 @@ public async Task PhysicalConnectionInitializer_sync_with_break() [Test] public async Task PhysicalConnectionInitializer_async_with_break() { - await using var adminConn = await OpenConnectionAsync(); - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( _ => throw new NotSupportedException(), @@ -1532,11 +1684,7 @@ public async Task PhysicalConnectionInitializer_async_with_break() [Test] public async Task PhysicalConnectionInitializer_async_throws_on_second_open() { - // With multiplexing a physical connection might open on NpgsqlConnection.OpenAsync (if there was no completed bootstrap beforehand) - // or on NpgsqlCommand.ExecuteReaderAsync. - // We've already tested the first case in PhysicalConnectionInitializer_async_throws above, testing the second one below. - await using var adminConn = await OpenConnectionAsync(); - + // We've already tested a simpler case in PhysicalConnectionInitializer_async_throws above, testing a second one below. var count = 0; var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( @@ -1549,9 +1697,13 @@ public async Task PhysicalConnectionInitializer_async_throws_on_second_open() }); await using var dataSource = dataSourceBuilder.Build(); - Assert.DoesNotThrowAsync(async () => await dataSource.OpenConnectionAsync()); + await using var conn1 = dataSource.CreateConnection(); + Assert.DoesNotThrowAsync(async () => await conn1.OpenAsync()); - var exception = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + await using var tx = await conn1.BeginTransactionAsync(); + + await using var conn2 = dataSource.CreateConnection(); + var exception = Assert.ThrowsAsync(async () => await conn2.OpenAsync())!; Assert.That(exception.Message, Is.EqualTo("INTENTIONAL FAILURE")); } @@ -1579,17 +1731,147 @@ public async Task PhysicalConnectionInitializer_disposes_connection() #endregion Physical connection initialization + #region Require auth + + [Test] + public async Task Connect_with_any_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"{RequireAuthMode.Password},{RequireAuthMode.MD5},{RequireAuthMode.GSS},{RequireAuthMode.SSPI},{RequireAuthMode.ScramSHA256},{RequireAuthMode.None}"; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Connect_with_any_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"{RequireAuthMode.Password},{RequireAuthMode.MD5},{RequireAuthMode.GSS},{RequireAuthMode.SSPI},{RequireAuthMode.ScramSHA256},{RequireAuthMode.None}"); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + public async Task Connect_with_any_except_none_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"!{RequireAuthMode.None}"; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Connect_with_any_except_none_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"!{RequireAuthMode.None}"); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + public async Task Fail_connect_with_none_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"{RequireAuthMode.None}"; + }); + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.Message, Does.Contain("authentication method is not allowed")); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Fail_connect_with_none_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"{RequireAuthMode.None}"); + await using var dataSource = CreateDataSource(); + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.Message, Does.Contain("authentication method is not allowed")); + } + + [Test] + public async Task Connect_with_md5_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"{RequireAuthMode.MD5}"; + }); + try + { + await using var conn = await dataSource.OpenConnectionAsync(); + } + catch (Exception e) when (!IsOnBuildServer) + { + Console.WriteLine(e); + Assert.Ignore("MD5 authentication doesn't seem to be set up"); + } + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Connect_with_md5_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"{RequireAuthMode.MD5}"); + await using var dataSource = CreateDataSource(); + try + { + await using var conn = await dataSource.OpenConnectionAsync(); + } + catch (Exception e) when (!IsOnBuildServer) + { + Console.WriteLine(e); + Assert.Ignore("MD5 authentication doesn't seem to be set up"); + } + } + + [Test] + public void Mixed_auth_methods_not_supported([Values( + $"{nameof(RequireAuthMode.ScramSHA256)},!{nameof(RequireAuthMode.None)}", + $"!{nameof(RequireAuthMode.ScramSHA256)},{nameof(RequireAuthMode.None)}")] + string authMethods) + { + var csb = new NpgsqlConnectionStringBuilder(); + Assert.Throws(() => csb.RequireAuth = authMethods); + } + + [Test] + public void Remove_all_auth_methods_throws() + { + var csb = new NpgsqlConnectionStringBuilder(); + Assert.Throws(() => + csb.RequireAuth = $"!{RequireAuthMode.Password},!{RequireAuthMode.MD5},!{RequireAuthMode.GSS},!{RequireAuthMode.SSPI},!{RequireAuthMode.ScramSHA256},!{RequireAuthMode.None}"); + } + + [Test] + public void Unknown_auth_method_throws() + { + var csb = new NpgsqlConnectionStringBuilder(); + Assert.Throws(() => csb.RequireAuth = "SuperSecure"); + } + + [Test] + public void Auth_methods_are_trimmed() + { + var csb = new NpgsqlConnectionStringBuilder + { + RequireAuth = $"{RequireAuthMode.Password} , {RequireAuthMode.MD5}" + }; + Assert.That(csb.RequireAuthModes, Is.EqualTo(RequireAuthMode.Password | RequireAuthMode.MD5)); + } + + #endregion Require auth + [Test] [NonParallelizable] // Modifies global database info factories [IssueLink("https://github.com/npgsql/npgsql/issues/4425")] public async Task Breaking_connection_while_loading_database_info() { - if (IsMultiplexing) - return; - - using var _ = CreateTempPool(ConnectionString, out var connString); + await using var dataSource = CreateDataSource(); - await using var firstConn = new NpgsqlConnection(connString); + await using var firstConn = dataSource.CreateConnection(); NpgsqlDatabaseInfo.RegisterFactory(new BreakingDatabaseInfoFactory()); try { @@ -1602,7 +1884,7 @@ public async Task Breaking_connection_while_loading_database_info() } await firstConn.OpenAsync(); - await using var secondConn = await OpenConnectionAsync(connString); + await using var secondConn = await dataSource.OpenConnectionAsync(); await secondConn.CloseAsync(); await firstConn.ReloadTypesAsync(); @@ -1673,12 +1955,9 @@ public async Task Log_Open_Close_pooled() AssertLoggingStateContains(closedConnectionEvent, "Port", port); AssertLoggingStateContains(closedConnectionEvent, "Database", database); - if (!IsMultiplexing) - { - AssertLoggingStateContains(openedConnectionEvent, "ConnectorId", processId); - AssertLoggingStateContains(closingConnectionEvent, "ConnectorId", processId); - AssertLoggingStateContains(closedConnectionEvent, "ConnectorId", processId); - } + AssertLoggingStateContains(openedConnectionEvent, "ConnectorId", processId); + AssertLoggingStateContains(closingConnectionEvent, "ConnectorId", processId); + AssertLoggingStateContains(closedConnectionEvent, "ConnectorId", processId); var ids = new[] { @@ -1695,9 +1974,6 @@ public async Task Log_Open_Close_pooled() [Test] public async Task Log_Open_Close_physical() { - if (IsMultiplexing) - return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { Pooling = false }; await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, csb.ToString()); await using var conn = dataSource.CreateConnection(); @@ -1749,6 +2025,4 @@ void AssertLoggingConnectionString(NpgsqlConnection connection, object? logState } #endregion Logging tests - - public ConnectionTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index adac0916f4..d3551517bc 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1,20 +1,23 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; +using System.Collections.Specialized; using System.Data; +using System.Diagnostics; using System.IO; using System.Numerics; using System.Text; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; +using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; -public class CopyTests : MultiplexingTestBase +public class CopyTests : TestBase { #region Issue 2257 @@ -64,10 +67,9 @@ public async Task Raw_binary_roundtrip([Values(false, true)] bool async) const int iterations = 500; var table = await GetTempTableName(conn); - + await conn.ExecuteNonQueryAsync($@"CREATE TABLE {table} (field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER)"); using (var tx = conn.BeginTransaction()) { - await conn.ExecuteNonQueryAsync($@"CREATE TABLE {table} (field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER)"); // Preload some data into the table using (var cmd = @@ -158,14 +160,15 @@ public async Task Cancel_raw_binary_import() using var conn = await OpenConnectionAsync(); var table = await GetTempTableName(conn); await conn.ExecuteNonQueryAsync($@"CREATE TABLE {table} (field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER)"); - - var garbage = new byte[] {1, 2, 3, 4}; - using (var s = conn.BeginRawBinaryCopy($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + await using (var tx = await conn.BeginTransactionAsync()) { - s.Write(garbage, 0, garbage.Length); - s.Cancel(); + var garbage = new byte[] {1, 2, 3, 4}; + using (var s = conn.BeginRawBinaryCopy($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + { + s.Write(garbage, 0, garbage.Length); + s.Cancel(); + } } - Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } @@ -224,8 +227,6 @@ public async Task Wrong_table_definition_raw_binary_copy() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_raw_binary_copy() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using (var conn = await OpenConnectionAsync()) { var table = await CreateTempTable(conn, "blob BYTEA"); @@ -293,6 +294,7 @@ public async Task Binary_roundtrip([Values(false, true)] bool async) Assert.That(reader.StartRow(), Is.EqualTo(2)); Assert.That(reader.Read(), Is.EqualTo(longString)); Assert.That(reader.IsNull, Is.True); + Assert.That(reader.IsNull, Is.True); reader.Skip(); Assert.That(reader.StartRow(), Is.EqualTo(-1)); @@ -306,13 +308,15 @@ public async Task Cancel_binary_import() { using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER"); - - using (var writer = conn.BeginBinaryImport($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + await using (var tx = await conn.BeginTransactionAsync()) { - writer.StartRow(); - writer.Write("Hello"); - writer.Write(8); - // No commit should rollback + using (var writer = conn.BeginBinaryImport($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write("Hello"); + writer.Write(8); + // No commit should rollback + } } Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } @@ -356,9 +360,9 @@ public async Task Import_numeric() await using var cmd = conn.CreateCommand(); cmd.CommandText = $"SELECT field FROM {table}"; await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetValue(0), Is.EqualTo(1234m)); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetValue(0), Is.EqualTo(5678m)); } @@ -380,6 +384,46 @@ public async Task Import_string_array() Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(data)); } + [Test] + public async Task Import_DBNull_then_other_object() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field TEXT"); + + object data = "foo"; + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write((object?)DBNull.Value); + writer.StartRow(); + writer.Write(data); + var rowsWritten = writer.Complete(); + Assert.That(rowsWritten, Is.EqualTo(2)); + } + + Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table} OFFSET 1"), Is.EqualTo(data)); + } + + [Test] + public async Task Import_reused_instance_mapping_info_identical_or_throws() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field int4"); + + var data = 8; + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write(data, NpgsqlDbType.Integer); + writer.StartRow(); + Assert.Throws(Is.TypeOf().With.Property("Message").StartsWith("Write for column 0 resolves to a different PostgreSQL type"), + () => writer.Write(data, "int2")); + // Should be recoverable by using the same type again. + writer.Write(data, "int4"); + writer.Complete(); + } + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/816")] public async Task Import_string_with_buffer_length() { @@ -413,6 +457,46 @@ public async Task Import_direct_buffer() writer.Write(data); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5330")] + public async Task Import_object_null() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field TEXT[]"); + + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write(null, NpgsqlDbType.Boolean); + var rowsWritten = writer.Complete(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(DBNull.Value)); + } + + static readonly TestCaseData[] DBNullValues = + [ + new TestCaseData(DBNull.Value).SetName("DBNull.Value"), + new TestCaseData(null).SetName("null") + ]; + + [Test, TestCaseSource(nameof(DBNullValues))] + public async Task Import_dbnull(DBNull? value) + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field TEXT[]"); + + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write(value, NpgsqlDbType.Boolean); + var rowsWritten = writer.Complete(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(DBNull.Value)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_table_definition_binary_import() { @@ -426,8 +510,6 @@ public async Task Wrong_table_definition_binary_import() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_binary_import() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginBinaryImport($"COPY {table} (blob) FROM STDIN")); @@ -444,11 +526,134 @@ public async Task Wrong_table_definition_binary_export() Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5457")] + public async Task MixedOperations() + { + using var conn = await OpenConnectionAsync(); + + using var reader = conn.BeginBinaryExport(""" + COPY (values ('foo', 1), ('bar', null), (null, 2)) TO STDOUT BINARY + """); + while(reader.StartRow() != -1) + { + string? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + int? col2 = null; + if (reader.IsNull) + reader.Skip(); + else + col2 = reader.Read(); + } + } + + [Test] + public async Task ReadMoreColumnsThanExist() + { + using var conn = await OpenConnectionAsync(); + + using var reader = conn.BeginBinaryExport(""" + COPY (values ('foo', 1), ('bar', null), (null, 2)) TO STDOUT BINARY + """); + while(reader.StartRow() != -1) + { + string? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + int? col2 = null; + if (reader.IsNull) + reader.Skip(); + else + col2 = reader.Read(); + + Assert.Throws(() => _ = reader.IsNull); + } + } + + [Test] + public async Task ReadZeroSizedColumns() + { + using var conn = await OpenConnectionAsync(); + + using var reader = conn.BeginBinaryExport(""" + COPY (values (1, '', ''), (2, null, ''), (3, '', null)) TO STDOUT BINARY + """); + while(reader.StartRow() != -1) + { + int? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + + string? col2 = null; + if (reader.IsNull) + reader.Skip(); + else + col2 = reader.Read(); + + string? col3 = null; + if (reader.IsNull) + reader.Skip(); + else + col3 = reader.Read(); + } + } + + [Test] + public async Task ReadTypeInfoProviderType() + { + using var conn = await OpenConnectionAsync(); + + using (var reader = conn.BeginBinaryExport(""" + COPY (values (NOW()), (NULL)) TO STDOUT BINARY + """)) + { + while (reader.StartRow() != -1) + { + DateTime? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + } + } + + using (var reader = conn.BeginBinaryExport(""" + COPY (values (NOW()), (NULL)) TO STDOUT BINARY + """)) + { + while (reader.StartRow() != -1) + { + DateTimeOffset? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + } + } + } + + [Test] + public async Task StreamingRead() + { + using var conn = await OpenConnectionAsync(); + + var str = new string('a', PgReader.MaxPreparedTextReaderSize + 1); + var reader = conn.BeginBinaryExport($"""COPY (values ('{str}')) TO STDOUT BINARY"""); + while (reader.StartRow() != -1) + { + using var _ = reader.Read(NpgsqlDbType.Text); + } + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_binary_export() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginBinaryExport($"COPY {table} (blob) TO STDOUT")); @@ -456,12 +661,8 @@ public async Task Wrong_format_binary_export() } [Test, NonParallelizable, IssueLink("https://github.com/npgsql/npgsql/issues/661")] - [Ignore("Unreliable")] public async Task Unexpected_exception_binary_import() { - if (IsMultiplexing) - return; - // Use a private data source since we terminate the connection below (affects database state) await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); @@ -480,7 +681,7 @@ public async Task Unexpected_exception_binary_import() writer.StartRow(); writer.Write(data); writer.Dispose(); - }, Throws.Exception.TypeOf()); + }, Throws.Exception.InstanceOf()); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); } @@ -524,12 +725,19 @@ public async Task Export_long_string() using (var reader = conn.BeginBinaryExport($"COPY {table} (foo1, foo2, foo3, foo4, foo5) TO STDIN BINARY")) { - for (var row = 0; row < iterations; row++) + int row, col = 0; + for (row = 0; row < iterations; row++) { Assert.That(reader.StartRow(), Is.EqualTo(5)); - for (var col = 0; col < 5; col++) - Assert.That(reader.Read().Length, Is.EqualTo(len)); + for (col = 0; col < 5; col++) + { + var str = reader.Read(); + Assert.That(str.Length, Is.EqualTo(len)); + Assert.That(str.AsSpan().IndexOfAnyExcept('x') is -1); + } } + Assert.That(row, Is.EqualTo(100)); + Assert.That(col, Is.EqualTo(5)); } } @@ -540,16 +748,18 @@ public async Task Read_bit_string() var table = await GetTempTableName(conn); await conn.ExecuteNonQueryAsync($@" -CREATE TABLE {table} (bits BIT(3), bitarray BIT(3)[]); -INSERT INTO {table} (bits, bitarray) VALUES (B'101', ARRAY[B'101', B'111'])"); +CREATE TABLE {table} (bits BIT(11), bitvector BIT(11), bitarray BIT(3)[]); +INSERT INTO {table} (bits, bitvector, bitarray) VALUES (B'00000001101', B'00000001101', ARRAY[B'101', B'111'])"); - using var reader = conn.BeginBinaryExport($"COPY {table} (bits, bitarray) TO STDIN BINARY"); + using var reader = conn.BeginBinaryExport($"COPY {table} (bits, bitvector, bitarray) TO STDIN BINARY"); reader.StartRow(); - Assert.That(reader.Read(), Is.EqualTo(new BitArray(new[] { true, false, true }))); + Assert.That(reader.Read(), Is.EqualTo(new BitArray([false, false, false, false, false, false, false, true, true, false, true + ]))); + Assert.That(reader.Read(), Is.EqualTo(new BitVector32(0b00000001101000000000000000000000))); Assert.That(reader.Read(), Is.EqualTo(new[] { - new BitArray(new[] { true, false, true }), - new BitArray(new[] { true, true, true }) + new BitArray([true, false, true]), + new BitArray([true, true, true]) })); } @@ -743,12 +953,15 @@ public async Task Write_column_out_of_bounds_throws() public async Task Cancel_raw_binary_export_when_not_consumed_and_then_Dispose() { await using var conn = await OpenConnectionAsync(); - // This must be large enough to cause Postgres to queue up CopyData messages. - var stream = conn.BeginRawBinaryCopy("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); - var buffer = new byte[32]; - await stream.ReadAsync(buffer, 0, buffer.Length); - stream.Cancel(); - Assert.DoesNotThrowAsync(async () => await stream.DisposeAsync()); + await using (var tx = await conn.BeginTransactionAsync()) + { + // This must be large enough to cause Postgres to queue up CopyData messages. + var stream = conn.BeginRawBinaryCopy("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); + var buffer = new byte[32]; + await stream.ReadExactlyAsync(buffer, 0, buffer.Length); + stream.Cancel(); + Assert.DoesNotThrowAsync(async () => await stream.DisposeAsync()); + } Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1), "The connection is still OK"); } @@ -756,26 +969,35 @@ public async Task Cancel_raw_binary_export_when_not_consumed_and_then_Dispose() public async Task Cancel_binary_export_when_not_consumed_and_then_Dispose() { await using var conn = await OpenConnectionAsync(); - // This must be large enough to cause Postgres to queue up CopyData messages. - var exporter = conn.BeginBinaryExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); - await exporter.StartRowAsync(); - await exporter.ReadAsync(); - exporter.Cancel(); - Assert.DoesNotThrowAsync(async () => await exporter.DisposeAsync()); + await using (var tx = await conn.BeginTransactionAsync()) + { + // This must be large enough to cause Postgres to queue up CopyData messages. + var exporter = conn.BeginBinaryExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); + await exporter.StartRowAsync(); + await exporter.ReadAsync(); + exporter.Cancel(); + Assert.DoesNotThrowAsync(async () => await exporter.DisposeAsync()); + } Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1), "The connection is still OK"); } [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4417")] - public async Task Binary_copy_throws_for_nullable() + [IssueLink("https://github.com/npgsql/npgsql/issues/5110")] + public async Task Binary_copy_read_char_column() { await using var conn = await OpenConnectionAsync(); - var tableName = await CreateTempTable(conn, "house_number integer"); + var tableName = await CreateTempTable(conn, "id serial, value char"); + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = $"INSERT INTO {tableName}(value) VALUES ('d'), ('s')"; + await cmd.ExecuteNonQueryAsync(); - await using var writer = await conn.BeginBinaryImportAsync($"COPY {tableName}(house_number) FROM STDIN BINARY"); - int? value = 1; - await writer.StartRowAsync(); - Assert.ThrowsAsync(async () => await writer.WriteAsync(value, NpgsqlDbType.Integer)); + await using var export = await conn.BeginBinaryExportAsync($"COPY {tableName}(id, value) TO STDOUT (FORMAT BINARY)"); + while (await export.StartRowAsync() != -1) + { + var id = export.Read(); + var value = export.Read(); + } } #endregion @@ -816,10 +1038,12 @@ public async Task Cancel_text_import() { using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER"); - - var writer = (NpgsqlCopyTextWriter)conn.BeginTextImport($"COPY {table} (field_text, field_int4) FROM STDIN"); - writer.Write("HELLO\t1\n"); - writer.Cancel(); + await using (var tx = await conn.BeginTransactionAsync()) + { + var writer = (NpgsqlCopyTextWriter)conn.BeginTextImport($"COPY {table} (field_text, field_int4) FROM STDIN"); + writer.Write("HELLO\t1\n"); + writer.Cancel(); + } Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } @@ -879,8 +1103,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_table_definition_text_import() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); Assert.Throws(() => conn.BeginTextImport("COPY table_is_not_exist (blob) FROM STDIN")); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); @@ -890,8 +1112,6 @@ public async Task Wrong_table_definition_text_import() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_text_import() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginTextImport($"COPY {table} (blob) FROM STDIN BINARY")); @@ -901,8 +1121,6 @@ public async Task Wrong_format_text_import() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_table_definition_text_export() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); Assert.Throws(() => conn.BeginTextExport("COPY table_is_not_exist (blob) TO STDOUT")); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); @@ -912,8 +1130,6 @@ public async Task Wrong_table_definition_text_export() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_text_export() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginTextExport($"COPY {table} (blob) TO STDOUT BINARY")); @@ -924,12 +1140,15 @@ public async Task Wrong_format_text_export() public async Task Cancel_text_export_when_not_consumed_and_then_Dispose() { await using var conn = await OpenConnectionAsync(); - // This must be large enough to cause Postgres to queue up CopyData messages. - var reader = (NpgsqlCopyTextReader) conn.BeginTextExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT"); - var buffer = new char[32]; - await reader.ReadAsync(buffer, 0, buffer.Length); - reader.Cancel(); - Assert.DoesNotThrow(reader.Dispose); + await using (var tx = await conn.BeginTransactionAsync()) + { + // This must be large enough to cause Postgres to queue up CopyData messages. + var reader = (NpgsqlCopyTextReader) conn.BeginTextExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT"); + var buffer = new char[32]; + await reader.ReadAsync(buffer, 0, buffer.Length); + reader.Cancel(); + Assert.DoesNotThrow(reader.Dispose); + } Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1), "The connection is still OK"); } @@ -1009,7 +1228,7 @@ public async Task Write_null_values() { writer.StartRow(); writer.Write(DBNull.Value, NpgsqlDbType.Integer); - writer.Write((string?)null, NpgsqlDbType.Uuid); + writer.Write(null, NpgsqlDbType.Uuid); writer.Write(DBNull.Value); writer.Write((string?)null); var rowsWritten = writer.Complete(); @@ -1034,7 +1253,7 @@ public async Task Write_different_types() { writer.StartRow(); writer.Write(3.0, NpgsqlDbType.Integer); - writer.Write((object)new[] { 1, 2, 3 }); + writer.Write(new[] { 1, 2, 3 }); writer.StartRow(); writer.Write(3, NpgsqlDbType.Integer); writer.Write((object)new List { 4, 5, 6 }); @@ -1044,7 +1263,7 @@ public async Task Write_different_types() Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(2)); } - [Test, Description("Tests nested binding scopes in multiplexing")] + [Test] public async Task Within_transaction() { using var conn = await OpenConnectionAsync(); @@ -1085,17 +1304,50 @@ public async Task Within_transaction() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4199")] - public async Task Copy_is_not_supported_in_regular_command_execution() + public async Task Copy_from_is_not_supported_in_regular_command_execution() { - // Run in a separate pool to protect other queries in multiplexing - // because we're going to break the connection on CopyInResponse - using var _ = CreateTempPool(ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + // Run in a separate pool because we're going to break the connection on CopyInResponse + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) FROM stdin"), Throws.Exception.TypeOf()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4974")] + public async Task Copy_to_is_not_supported_in_regular_command_execution() + { + // Run in a separate pool because we're going to break the connection on CopyInResponse + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INT"); + + Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) TO stdin"), Throws.Exception.TypeOf()); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5209")] + [Platform(Exclude = "MacOsX", Reason = "Write might not throw an exception")] + public async Task RawBinaryCopy_write_nre([Values] bool async) + { + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + + var server = await postmasterMock.WaitForServerConnection(); + await server + .WriteCopyInResponse(isBinary: true) + .FlushAsync(); + + await using var stream = await conn.BeginRawBinaryCopyAsync("COPY SomeTable (field_text, field_int4) FROM STDIN"); + server.Close(); + var value = Encoding.UTF8.GetBytes(new string('a', conn.Settings.WriteBufferSize * 2)); + if (async) + Assert.ThrowsAsync(async () => await stream.WriteAsync(value)); + else + Assert.Throws(() => stream.Write(value)); + Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); + } + #endregion #region Utils @@ -1112,6 +1364,4 @@ void StateAssertions(NpgsqlConnection conn) } #endregion - - public CopyTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/DataAdapterTests.cs b/test/Npgsql.Tests/DataAdapterTests.cs index 4b413409d7..91de36e734 100644 --- a/test/Npgsql.Tests/DataAdapterTests.cs +++ b/test/Npgsql.Tests/DataAdapterTests.cs @@ -92,8 +92,8 @@ public async Task Insert_with_DataSet() var dr2 = new NpgsqlCommand($"SELECT field_int2, field_numeric, field_timestamp FROM {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2[0]); - Assert.AreEqual(7.3000000M, dr2[1]); + Assert.That(dr2[0], Is.EqualTo(4)); + Assert.That(dr2[1], Is.EqualTo(7.3000000M)); dr2.Close(); } @@ -137,11 +137,10 @@ public async Task DataAdapter_update_return_value() var ds2 = ds.GetChanges()!; var daupdate = da.Update(ds2); - Assert.AreEqual(2, daupdate); + Assert.That(daupdate, Is.EqualTo(2)); } [Test] - [Ignore("")] public async Task DataAdapter_update_return_value2() { using var conn = await OpenConnectionAsync(); @@ -158,15 +157,15 @@ public async Task DataAdapter_update_return_value2() da.Update(ds); //## change id from 1 to 2 - cmd.CommandText = $"update {table} set field_float4 = 0.8"; + cmd.CommandText = $"update {table} set field_numeric = 0.8"; cmd.ExecuteNonQuery(); //## change value to newvalue ds.Tables[0].Rows[0][1] = 0.7; //## update should fail, and make a DBConcurrencyException var count = da.Update(ds); - //## count is 1, even if the isn't updated in the database - Assert.AreEqual(0, count); + //## count is 1, even if the row isn't updated in the database + Assert.That(count, Is.EqualTo(1)); } [Test] @@ -180,16 +179,15 @@ public async Task Fill_with_empty_resultset() da.Fill(ds); - Assert.AreEqual(1, ds.Tables.Count); - Assert.AreEqual(4, ds.Tables[0].Columns.Count); - Assert.AreEqual("field_serial", ds.Tables[0].Columns[0].ColumnName); - Assert.AreEqual("field_int2", ds.Tables[0].Columns[1].ColumnName); - Assert.AreEqual("field_timestamp", ds.Tables[0].Columns[2].ColumnName); - Assert.AreEqual("field_numeric", ds.Tables[0].Columns[3].ColumnName); + Assert.That(ds.Tables.Count, Is.EqualTo(1)); + Assert.That(ds.Tables[0].Columns.Count, Is.EqualTo(4)); + Assert.That(ds.Tables[0].Columns[0].ColumnName, Is.EqualTo("field_serial")); + Assert.That(ds.Tables[0].Columns[1].ColumnName, Is.EqualTo("field_int2")); + Assert.That(ds.Tables[0].Columns[2].ColumnName, Is.EqualTo("field_timestamp")); + Assert.That(ds.Tables[0].Columns[3].ColumnName, Is.EqualTo("field_numeric")); } [Test] - [Ignore("")] public async Task Fill_add_with_key() { using var conn = await OpenConnectionAsync(); @@ -206,33 +204,33 @@ public async Task Fill_add_with_key() var field_timestamp = ds.Tables[0].Columns[2]; var field_numeric = ds.Tables[0].Columns[3]; - Assert.IsFalse(field_serial.AllowDBNull); - Assert.IsTrue(field_serial.AutoIncrement); - Assert.AreEqual("field_serial", field_serial.ColumnName); - Assert.AreEqual(typeof(int), field_serial.DataType); - Assert.AreEqual(0, field_serial.Ordinal); - Assert.IsTrue(field_serial.Unique); - - Assert.IsTrue(field_int2.AllowDBNull); - Assert.IsFalse(field_int2.AutoIncrement); - Assert.AreEqual("field_int2", field_int2.ColumnName); - Assert.AreEqual(typeof(short), field_int2.DataType); - Assert.AreEqual(1, field_int2.Ordinal); - Assert.IsFalse(field_int2.Unique); - - Assert.IsTrue(field_timestamp.AllowDBNull); - Assert.IsFalse(field_timestamp.AutoIncrement); - Assert.AreEqual("field_timestamp", field_timestamp.ColumnName); - Assert.AreEqual(typeof(DateTime), field_timestamp.DataType); - Assert.AreEqual(2, field_timestamp.Ordinal); - Assert.IsFalse(field_timestamp.Unique); - - Assert.IsTrue(field_numeric.AllowDBNull); - Assert.IsFalse(field_numeric.AutoIncrement); - Assert.AreEqual("field_numeric", field_numeric.ColumnName); - Assert.AreEqual(typeof(decimal), field_numeric.DataType); - Assert.AreEqual(3, field_numeric.Ordinal); - Assert.IsFalse(field_numeric.Unique); + Assert.That(field_serial.AllowDBNull, Is.False); + Assert.That(field_serial.AutoIncrement); + Assert.That(field_serial.ColumnName, Is.EqualTo("field_serial")); + Assert.That(field_serial.DataType, Is.EqualTo(typeof(int))); + Assert.That(field_serial.Ordinal, Is.EqualTo(0)); + Assert.That(field_serial.Unique, Is.False); + + Assert.That(field_int2.AllowDBNull); + Assert.That(field_int2.AutoIncrement, Is.False); + Assert.That(field_int2.ColumnName, Is.EqualTo("field_int2")); + Assert.That(field_int2.DataType, Is.EqualTo(typeof(short))); + Assert.That(field_int2.Ordinal, Is.EqualTo(1)); + Assert.That(field_int2.Unique, Is.False); + + Assert.That(field_timestamp.AllowDBNull); + Assert.That(field_timestamp.AutoIncrement, Is.False); + Assert.That(field_timestamp.ColumnName, Is.EqualTo("field_timestamp")); + Assert.That(field_timestamp.DataType, Is.EqualTo(typeof(DateTime))); + Assert.That(field_timestamp.Ordinal, Is.EqualTo(2)); + Assert.That(field_timestamp.Unique, Is.False); + + Assert.That(field_numeric.AllowDBNull); + Assert.That(field_numeric.AutoIncrement, Is.False); + Assert.That(field_numeric.ColumnName, Is.EqualTo("field_numeric")); + Assert.That(field_numeric.DataType, Is.EqualTo(typeof(decimal))); + Assert.That(field_numeric.Ordinal, Is.EqualTo(3)); + Assert.That(field_numeric.Unique, Is.False); } [Test] @@ -252,21 +250,21 @@ public async Task Fill_add_columns() var field_timestamp = ds.Tables[0].Columns[2]; var field_numeric = ds.Tables[0].Columns[3]; - Assert.AreEqual("field_serial", field_serial.ColumnName); - Assert.AreEqual(typeof(int), field_serial.DataType); - Assert.AreEqual(0, field_serial.Ordinal); + Assert.That(field_serial.ColumnName, Is.EqualTo("field_serial")); + Assert.That(field_serial.DataType, Is.EqualTo(typeof(int))); + Assert.That(field_serial.Ordinal, Is.EqualTo(0)); - Assert.AreEqual("field_int2", field_int2.ColumnName); - Assert.AreEqual(typeof(short), field_int2.DataType); - Assert.AreEqual(1, field_int2.Ordinal); + Assert.That(field_int2.ColumnName, Is.EqualTo("field_int2")); + Assert.That(field_int2.DataType, Is.EqualTo(typeof(short))); + Assert.That(field_int2.Ordinal, Is.EqualTo(1)); - Assert.AreEqual("field_timestamp", field_timestamp.ColumnName); - Assert.AreEqual(typeof(DateTime), field_timestamp.DataType); - Assert.AreEqual(2, field_timestamp.Ordinal); + Assert.That(field_timestamp.ColumnName, Is.EqualTo("field_timestamp")); + Assert.That(field_timestamp.DataType, Is.EqualTo(typeof(DateTime))); + Assert.That(field_timestamp.Ordinal, Is.EqualTo(2)); - Assert.AreEqual("field_numeric", field_numeric.ColumnName); - Assert.AreEqual(typeof(decimal), field_numeric.DataType); - Assert.AreEqual(3, field_numeric.Ordinal); + Assert.That(field_numeric.ColumnName, Is.EqualTo("field_numeric")); + Assert.That(field_numeric.DataType, Is.EqualTo(typeof(decimal))); + Assert.That(field_numeric.Ordinal, Is.EqualTo(3)); } [Test] @@ -302,9 +300,9 @@ public async Task Update_letting_null_field_falue() da.Fill(ds); var dt = ds.Tables[0]; - Assert.IsNotNull(dt); + Assert.That(dt, Is.Not.Null); - var dr = ds.Tables[0].Rows[ds.Tables[0].Rows.Count - 1]; + var dr = ds.Tables[0].Rows[^1]; dr["field_int2"] = 4; var ds2 = ds.GetChanges()!; @@ -314,7 +312,7 @@ public async Task Update_letting_null_field_falue() using var dr2 = new NpgsqlCommand($"SELECT field_int2 FROM {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2["field_int2"]); + Assert.That(dr2["field_int2"], Is.EqualTo(4)); } [Test] @@ -329,7 +327,6 @@ public async Task Fill_with_duplicate_column_name() } [Test] - [Ignore("")] public Task Update_with_DataSet() => DoUpdateWithDataSet(); public async Task DoUpdateWithDataSet() @@ -343,14 +340,14 @@ public async Task DoUpdateWithDataSet() var ds = new DataSet(); var da = new NpgsqlDataAdapter($"select * from {table}", conn); var cb = new NpgsqlCommandBuilder(da); - Assert.IsNotNull(cb); + Assert.That(cb, Is.Not.Null); da.Fill(ds); var dt = ds.Tables[0]; - Assert.IsNotNull(dt); + Assert.That(dt, Is.Not.Null); - var dr = ds.Tables[0].Rows[ds.Tables[0].Rows.Count - 1]; + var dr = ds.Tables[0].Rows[^1]; dr["field_int2"] = 4; @@ -361,11 +358,10 @@ public async Task DoUpdateWithDataSet() using var dr2 = new NpgsqlCommand($"select * from {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2["field_int2"]); + Assert.That(dr2["field_int2"], Is.EqualTo(4)); } [Test] - [Ignore("")] public async Task Insert_with_CommandBuilder_case_sensitive() { using var conn = await OpenConnectionAsync(); @@ -374,13 +370,13 @@ public async Task Insert_with_CommandBuilder_case_sensitive() var ds = new DataSet(); var da = new NpgsqlDataAdapter($"select * from {table}", conn); var builder = new NpgsqlCommandBuilder(da); - Assert.IsNotNull(builder); + Assert.That(builder, Is.Not.Null); da.Fill(ds); var dt = ds.Tables[0]; var dr = dt.NewRow(); - dr["Field_Case_Sensitive"] = 4; + dr["Field_int4"] = 4; dt.Rows.Add(dr); var ds2 = ds.GetChanges()!; @@ -390,7 +386,7 @@ public async Task Insert_with_CommandBuilder_case_sensitive() using var dr2 = new NpgsqlCommand($"select * from {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2[1]); + Assert.That(dr2["field_int4"], Is.EqualTo(4)); } [Test] @@ -449,12 +445,11 @@ public async Task DataAdapter_command_access() var da = new NpgsqlDataAdapter(); da.SelectCommand = command; System.Data.Common.DbDataAdapter common = da; - Assert.IsNotNull(common.SelectCommand); + Assert.That(common.SelectCommand, Is.Not.Null); } [Test, Description("Makes sure that the INSERT/UPDATE/DELETE commands are auto-populated on NpgsqlDataAdapter")] [IssueLink("https://github.com/npgsql/npgsql/issues/179")] - [Ignore("Somehow related to us using a temporary table???")] public async Task Auto_populate_adapter_commands() { using var conn = await OpenConnectionAsync(); @@ -494,7 +489,6 @@ public void Command_builder_quoting() [Test, Description("Makes sure a correct SQL string is built with GetUpdateCommand(true) using correct parameter names and placeholders")] [IssueLink("https://github.com/npgsql/npgsql/issues/397")] - [Ignore("Somehow related to us using a temporary table???")] public async Task Get_UpdateCommand() { using var conn = await OpenConnectionAsync(); @@ -532,13 +526,13 @@ public async Task Load_DataTable() dt.Load(dr); dr.Close(); - Assert.AreEqual(5, dt.Columns[0].MaxLength); - Assert.AreEqual(5, dt.Columns[1].MaxLength); + Assert.That(dt.Columns[0].MaxLength, Is.EqualTo(5)); + Assert.That(dt.Columns[1].MaxLength, Is.EqualTo(5)); } public Task SetupTempTable(NpgsqlConnection conn) => CreateTempTable(conn, @" -field_pk SERIAL PRIMARY KEY, +field_pk INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, field_serial SERIAL, field_int2 SMALLINT, field_int4 INTEGER, diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index adbed90c0d..c2ef7bc9cb 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -1,7 +1,11 @@ using System; using System.Data; +using System.Data.Common; +using System.Text.Json; +using System.Text.Json.Serialization; using System.Threading.Tasks; using NUnit.Framework; +using static Npgsql.Tests.TestUtil; // ReSharper disable MethodHasAsyncOverload @@ -10,7 +14,7 @@ namespace Npgsql.Tests; public class DataSourceTests : TestBase { [Test] - public async Task CreateConnection() + public new async Task CreateConnection() { await using var dataSource = NpgsqlDataSource.Create(ConnectionString); await using var connection = dataSource.CreateConnection(); @@ -70,9 +74,9 @@ public async Task ExecuteReader_on_connectionless_command([Values] bool async) await using var command = dataSource.CreateCommand(); command.CommandText = "SELECT 1"; - using (var reader = async ? await command.ExecuteReaderAsync() : command.ExecuteReader()) + await using (var reader = async ? await command.ExecuteReaderAsync() : command.ExecuteReader()) { - Assert.True(reader.Read()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); } @@ -121,16 +125,40 @@ public async Task ExecuteReader_on_connectionless_batch([Values] bool async) using (var reader = async ? await batch.ExecuteReaderAsync() : batch.ExecuteReader()) { - Assert.True(reader.Read()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); - Assert.True(reader.NextResult()); - Assert.True(reader.Read()); + Assert.That(reader.NextResult()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(2)); } Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 1, Busy: 0))); } + [Test] + public void Clear() + { + using var dataSource = NpgsqlDataSource.Create(ConnectionString); + var connection1 = dataSource.OpenConnection(); + var connection2 = dataSource.OpenConnection(); + connection1.Close(); + + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 2, Idle: 1, Busy: 1))); + + dataSource.Clear(); + + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 0, Busy: 1))); + + var connection3 = dataSource.OpenConnection(); + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 2, Idle: 0, Busy: 2))); + + connection2.Close(); + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 0, Busy: 1))); + + connection3.Close(); + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 1, Busy: 0))); + } + [Test] public void Dispose() { @@ -184,7 +212,7 @@ public void No_password_without_PersistSecurityInfo() [Test] public async Task Cannot_access_connection_transaction_on_data_source_command() { - await using var command = SharedDataSource.CreateCommand(); + await using var command = DataSource.CreateCommand(); Assert.That(() => command.Connection, Throws.Exception.TypeOf()); Assert.That(() => command.Connection = null, Throws.Exception.TypeOf()); @@ -198,7 +226,7 @@ public async Task Cannot_access_connection_transaction_on_data_source_command() [Test] public async Task Cannot_access_connection_transaction_on_data_source_batch() { - await using var batch = SharedDataSource.CreateBatch(); + await using var batch = DataSource.CreateBatch(); Assert.That(() => batch.Connection, Throws.Exception.TypeOf()); Assert.That(() => batch.Connection = null, Throws.Exception.TypeOf()); @@ -243,4 +271,158 @@ public async Task Cannot_get_connection_after_dispose_unpooled([Values] bool asy Assert.That(() => dataSource.OpenConnection(), Throws.Exception.TypeOf()); } } + + [Test] // #4752 + public async Task As_DbDataSource([Values] bool async) + { + await using DbDataSource dataSource = NpgsqlDataSource.Create(ConnectionString); + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + Assert.That(connection.State, Is.EqualTo(ConnectionState.Open)); + + await using var command = dataSource.CreateCommand("SELECT 1"); + + Assert.That(async + ? await command.ExecuteScalarAsync() + : command.ExecuteScalar(), Is.EqualTo(1)); + } + + [Test] + public async Task Executing_command_on_disposed_datasource() + { + DbDataSource dataSource = NpgsqlDataSource.Create(ConnectionString); + await using (var _ = await dataSource.OpenConnectionAsync()) {} + await dataSource.DisposeAsync(); + await using var command = dataSource.CreateCommand("SELECT 1"); + Assert.ThrowsAsync(command.ExecuteNonQueryAsync); + } + + [Test] + public async Task Connection_string_builder_settings_are_frozen_on_Build() + { + var builder = CreateDataSourceBuilder(); + builder.ConnectionStringBuilder.ApplicationName = "foo"; + await using var dataSource = builder.Build(); + + builder.ConnectionStringBuilder.ApplicationName = "bar"; + + await using var command = dataSource.CreateCommand("SHOW application_name"); + Assert.That(await command.ExecuteScalarAsync(), Is.EqualTo("foo")); + } + + class Test + { + public int Id { get; set; } + } + + [Test] + public async Task ConfigureJsonOptions_is_order_independent() + { + // Expect failure, no options + { + var builder = CreateDataSourceBuilder(); + builder.EnableDynamicJson(); + await using var dataSource = builder.Build(); + + await using var command = dataSource.CreateCommand("SELECT '{\"id\": 1}'::json;"); + using var reader = await command.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(default(int))); + } + + // Expect success, ConfigureJsonOptions before EnableDynamicJson + { + var builder = CreateDataSourceBuilder(); + builder.ConfigureJsonOptions(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + builder.EnableDynamicJson(); + await using var dataSource = builder.Build(); + + await using var command = dataSource.CreateCommand("SELECT '{\"id\": 1}'::json;"); + using var reader = await command.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(1)); + } + + // Expect success, EnableDynamicJson before ConfigureJsonOptions + { + var builder = CreateDataSourceBuilder(); + builder.EnableDynamicJson(); + builder.ConfigureJsonOptions(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + await using var dataSource = builder.Build(); + + await using var command = dataSource.CreateCommand("SELECT '{\"id\": 1}'::json;"); + using var reader = await command.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(1)); + } + } + + [Test] + public async Task ReloadTypes([Values] bool async) + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(type); + await using var dataSource = dataSourceBuilder.Build(); + + await using var connection = await dataSource.OpenConnectionAsync(); + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + if (async) + await dataSource.ReloadTypesAsync(); + else + dataSource.ReloadTypes(); + + Assert.ThrowsAsync(async () => await connection.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + + // Close connection and reopen to make sure it picks up the new type and mapping from the data source + await connection.CloseAsync(); + await connection.OpenAsync(); + + Assert.DoesNotThrowAsync(async () => await connection.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + } + + [Test] + public async Task ReloadTypes_across_data_sources([Values] bool async) + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(type); + await using var dataSource1 = dataSourceBuilder.Build(); + await using var connection1 = await dataSource1.OpenConnectionAsync(); + + await using var dataSource2 = dataSourceBuilder.Build(); + await using var connection2 = await dataSource2.OpenConnectionAsync(); + + await connection1.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + if (async) + await dataSource1.ReloadTypesAsync(); + else + dataSource1.ReloadTypes(); + + Assert.ThrowsAsync(async () => await connection1.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + + // Close connection and reopen to check that the new type and mapping is not available in dataSource2 + await connection2.CloseAsync(); + await connection2.OpenAsync(); + + Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + + await dataSource2.ReloadTypesAsync(); + + // Close connection2 and reopen to make sure it picks up the new type and mapping from dataSource2 + await connection2.CloseAsync(); + await connection2.OpenAsync(); + + Assert.DoesNotThrowAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + } + + enum Mood { Sad, Ok, Happy } } diff --git a/test/Npgsql.Tests/DataTypeNameTests.cs b/test/Npgsql.Tests/DataTypeNameTests.cs new file mode 100644 index 0000000000..acd209060e --- /dev/null +++ b/test/Npgsql.Tests/DataTypeNameTests.cs @@ -0,0 +1,86 @@ +using System; +using Npgsql.Internal.Postgres; +using NUnit.Framework; + +namespace Npgsql.Tests; + +public class DataTypeNameTests +{ + [Test] + public void MaxLengthDataTypeName() + { + var name = new string('a', DataTypeName.NAMEDATALEN); + var fullyQualifiedDataTypeName= $"public.{name}"; + Assert.DoesNotThrow(() => new DataTypeName(fullyQualifiedDataTypeName)); + Assert.That(fullyQualifiedDataTypeName, Is.EqualTo(new DataTypeName(fullyQualifiedDataTypeName).Value)); + } + + [Test] + public void TooLongDataTypeName() + { + var name = new string('a', DataTypeName.NAMEDATALEN + 1); + var fullyQualifiedDataTypeName= $"public.{name}"; + var exception = Assert.Throws(() => new DataTypeName(fullyQualifiedDataTypeName)); + Assert.That(exception!.Message, Does.EndWith($": public.{new string('a', DataTypeName.NAMEDATALEN)}")); + } + + [TestCase("public.name", ExpectedResult = "public._name")] + [TestCase("public._name", ExpectedResult = "public._name")] + [TestCase("public.zzzaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa123", ExpectedResult = "public._zzzaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa12")] + public string ToArrayName(string name) + => new DataTypeName(name).ToArrayName(); + + [TestCase("public.multirange", ExpectedResult = "public.multirange")] + [TestCase("public.abcmultirange123", ExpectedResult = "public.abcmultirange123")] + [TestCase("public.multiRANGE", ExpectedResult = "public.multiRANGE_multirange")] + public string ToDefaultMultirangeNameHasMultiRange(string name) + => new DataTypeName(name).ToDefaultMultirangeName(); + + [TestCase("public.range", ExpectedResult = "public.multirange")] + [TestCase("public.abcrange123", ExpectedResult = "public.abcmultirange123")] + [TestCase("public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarange", ExpectedResult = "public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaamultirange")] // Replace goes to max length + [TestCase("public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarange1", ExpectedResult = "public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaamultir")] // Replace goes over max length + [TestCase("public.RANGE", ExpectedResult = "public.RANGE_multirange")] + public string ToDefaultMultirangeNameHasRange(string name) + => new DataTypeName(name).ToDefaultMultirangeName(); + + [TestCase("public.name", null, ExpectedResult = "public.name")] + [TestCase("public._name", null, ExpectedResult = "public._name")] + [TestCase("public.name[]", null, ExpectedResult = "public._name")] + [TestCase("public.integer", null, ExpectedResult = "public.integer")] + [TestCase("name", null, ExpectedResult = "pg_catalog.name")] + [TestCase("_name", null, ExpectedResult = "pg_catalog._name")] + [TestCase("name[]", null, ExpectedResult = "pg_catalog._name")] + [TestCase("mytype", null, ExpectedResult = "-.mytype")] + [TestCase("_mytype", null, ExpectedResult = "-._mytype")] + [TestCase("mytype[]", null, ExpectedResult = "-._mytype")] + [TestCase("character varying", null, ExpectedResult = "pg_catalog.varchar")] + [TestCase("decimal(facet_name)", null, ExpectedResult = "pg_catalog.numeric")] + [TestCase("name", "public", ExpectedResult = "public.name")] + [TestCase("name ", "public", ExpectedResult = "public.name")] + [TestCase("_name", "public", ExpectedResult = "public._name")] + [TestCase("name[]", "public", ExpectedResult = "public._name")] + [TestCase("timestamp with time zone", "public", ExpectedResult = "public.timestamp with time zone")] + [TestCase("timestamp with time zone", "pg_catalog", ExpectedResult = "pg_catalog.timestamptz")] + [TestCase("timestamp with time zone", null, ExpectedResult = "pg_catalog.timestamptz")] + [TestCase("boolean(facet_name)", "public", ExpectedResult = "public.boolean(facet_name)")] + [TestCase("boolean(facet_name)", "pg_catalog", ExpectedResult = "pg_catalog.bool")] + [TestCase("boolean(facet_name)", null, ExpectedResult = "pg_catalog.bool")] + [TestCase(" public.name ", null, ExpectedResult = "public.name")] + [TestCase("decimal", "public", ExpectedResult = "public.decimal")] + [TestCase("numeric", "public", ExpectedResult = "public.numeric")] + public string FromDisplayName(string name, string? schema) + => DataTypeName.FromDisplayName(schema is null or "pg_catalog" ? name : schema + "." + name).Value; + + [TestCase("pg_catalog.bool", ExpectedResult = "boolean")] + [TestCase("public.bool", ExpectedResult = "bool")] + [TestCase("pg_catalog.numeric", ExpectedResult = "numeric")] + [TestCase("pg_catalog._numeric", ExpectedResult = "numeric[]")] + [TestCase("pg_catalog.decimal", ExpectedResult = "numeric")] + [TestCase("public.numeric", ExpectedResult = "numeric")] + [TestCase("public._numeric", ExpectedResult = "numeric[]")] + [TestCase("public.decimal", ExpectedResult = "decimal")] + [TestCase("public._decimal", ExpectedResult = "decimal[]")] + public string UnqualifiedDisplayName(string fullyQualifiedName) + => new DataTypeName(fullyQualifiedName).UnqualifiedDisplayName; +} diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index 856861fa3a..aab4447ff2 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -1,5 +1,3 @@ -#if NET7_0_OR_GREATER - using System; using System.Collections.Concurrent; using System.Collections.Generic; @@ -7,7 +5,6 @@ using System.Text; using System.Threading; using System.Transactions; -using Npgsql.Internal; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -22,9 +19,11 @@ public void Two_connections_rollback_implicit_enlistment() using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOnDataSource; + using (new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) { conn1.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test1')"); conn2.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test2')"); @@ -44,8 +43,10 @@ public void Two_connections_rollback_explicit_enlistment() using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); - using (var conn1 = OpenConnection(ConnectionStringEnlistOff)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOff)) + var dataSource = EnlistOffDataSource; + + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) using (new TransactionScope()) { conn1.EnlistTransaction(Transaction.Current); @@ -69,9 +70,11 @@ public void Two_connections_commit() using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOnDataSource; + using (var scope = new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) { conn1.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test1')"); conn2.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test2')"); @@ -91,7 +94,7 @@ public void Two_connections_commit() public void Two_connections_with_failure() { // Use our own data source since this test breaks the connection with a critical failure, affecting database state tracking. - using var dataSource = NpgsqlDataSource.Create(ConnectionStringEnlistOn); + using var dataSource = CreateDataSource(csb => csb.Enlist = true); using var adminConn = dataSource.OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); @@ -111,37 +114,6 @@ public void Two_connections_with_failure() AssertNumberOfRows(adminConn, table, 0); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1737")] - public void Multiple_unpooled_connections_do_not_reuse() - { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Pooling = false, - Enlist = true - }; - - using var scope = new TransactionScope(); - - int processId; - - using (var conn1 = OpenConnection(csb)) - using (var cmd = new NpgsqlCommand("SELECT 1", conn1)) - { - processId = conn1.ProcessID; - cmd.ExecuteNonQuery(); - } - - using (var conn2 = OpenConnection(csb)) - using (var cmd = new NpgsqlCommand("SELECT 1", conn2)) - { - // The connection reuse optimization isn't implemented for unpooled connections (though it could be) - Assert.That(conn2.ProcessID, Is.Not.EqualTo(processId)); - cmd.ExecuteNonQuery(); - } - - scope.Complete(); - } - [Test(Description = "Transaction race, bool distributed")] [Explicit("Fails on Appveyor (https://ci.appveyor.com/project/roji/npgsql/build/3.3.0-250)")] public void Transaction_race([Values(false, true)] bool distributed) @@ -149,13 +121,15 @@ public void Transaction_race([Values(false, true)] bool distributed) using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOnDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { using (var tx = new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) { eventQueue.Enqueue(new TransactionEvent("Scope started, connection enlisted")); conn1.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test1')"); @@ -205,12 +179,12 @@ public void Transaction_race([Values(false, true)] bool distributed) } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } @@ -221,12 +195,14 @@ public void Connection_reuse_race_after_transaction([Values(false, true)] bool d using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOffDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { - using var conn1 = OpenConnection(ConnectionStringEnlistOff); + using var conn1 = dataSource.OpenConnection(); using (var scope = new TransactionScope()) { @@ -257,12 +233,12 @@ public void Connection_reuse_race_after_transaction([Values(false, true)] bool d } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } @@ -273,12 +249,14 @@ public void Connection_reuse_race_after_rollback([Values(false, true)] bool dist using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOffDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { - using var conn1 = OpenConnection(ConnectionStringEnlistOff); + using var conn1 = dataSource.OpenConnection(); using (new TransactionScope()) { @@ -309,12 +287,12 @@ public void Connection_reuse_race_after_rollback([Values(false, true)] bool dist } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } @@ -326,12 +304,14 @@ public void Connection_reuse_race_chaining_transaction([Values(false, true)] boo using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOffDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { - using var conn1 = OpenConnection(ConnectionStringEnlistOff); + using var conn1 = dataSource.OpenConnection(); using (var scope = new TransactionScope()) { @@ -385,16 +365,33 @@ public void Connection_reuse_race_chaining_transaction([Values(false, true)] boo } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5246")] + public void Transaction_complete_with_undisposed_connections() + { + using var deleteOuter = new TransactionScope(); + using (var delImidiate = new TransactionScope(TransactionScopeOption.RequiresNew)) + { + var deleteNow = EnlistOnDataSource.OpenConnection(); + deleteNow.ExecuteNonQuery("SELECT 'del_now'"); + var deleteNow2 = EnlistOnDataSource.OpenConnection(); + deleteNow2.ExecuteNonQuery("SELECT 'del_now2'"); + delImidiate.Complete(); + } + var deleteConn = EnlistOnDataSource.OpenConnection(); + deleteConn.ExecuteNonQuery("SELECT 'delete, this should commit last'"); + deleteOuter.Complete(); + } + #region Utilities // MSDTC is asynchronous, i.e. Commit/Rollback may return before the transaction has actually completed in the database; @@ -427,7 +424,8 @@ void AssertNoPreparedTransactions() int GetNumberOfPreparedTransactions() { - using (var conn = OpenConnection(ConnectionStringEnlistOff)) + var dataSource = EnlistOffDataSource; + using (var conn = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT COUNT(*) FROM pg_prepared_xacts WHERE database = @database", conn)) { cmd.Parameters.Add(new NpgsqlParameter("database", conn.Database)); @@ -444,11 +442,9 @@ static void AssertNoDistributedIdentifier() static void AssertHasDistributedIdentifier() => Assert.That(Transaction.Current?.TransactionInformation.DistributedIdentifier ?? Guid.Empty, Is.Not.EqualTo(Guid.Empty), "Distributed identifier not found"); - public string ConnectionStringEnlistOn - => new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = true }.ToString(); + NpgsqlDataSource EnlistOnDataSource { get; set; } = default!; - public string ConnectionStringEnlistOff - => new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = false }.ToString(); + NpgsqlDataSource EnlistOffDataSource { get; set; } = default!; static string FormatEventQueue(ConcurrentQueue eventQueue) { @@ -566,11 +562,9 @@ void Current_TransactionCompleted(object sender, TransactionEventArgs e) } } - public class TransactionEvent + public class TransactionEvent(string message) { - public TransactionEvent(string message) - => Message = $"{message} (TId {Thread.CurrentThread.ManagedThreadId})"; - public string Message { get; } + public string Message { get; } = $"{message} (TId {Thread.CurrentThread.ManagedThreadId})"; } #endregion Utilities @@ -606,6 +600,18 @@ public void OneTimeSetUp() } foreach (var xactGid in lingeringTransactions) connection.ExecuteNonQuery($"ROLLBACK PREPARED '{xactGid}'"); + + EnlistOnDataSource = CreateDataSource(csb => csb.Enlist = true); + EnlistOffDataSource = CreateDataSource(csb => csb.Enlist = false); + } + + [OneTimeTearDown] + public void OnTimeTearDown() + { + EnlistOnDataSource?.Dispose(); + EnlistOnDataSource = null!; + EnlistOffDataSource?.Dispose(); + EnlistOffDataSource = null!; } [SetUp] @@ -625,5 +631,3 @@ internal static string CreateTempTable(NpgsqlConnection conn, string columns) #endregion } - -#endif diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index 4c35b5bf66..ec7e7f18db 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -16,16 +16,15 @@ public class ExceptionTests : TestBase [Test, Description("Generates a basic server-side exception, checks that it's properly raised and populated")] public void Basic() { - using var conn = OpenConnection(new NpgsqlConnectionStringBuilder(ConnectionString) - { - // Make sure messages are in English - Options = "-c lc_messages=en_US.UTF-8" - }); - conn.ExecuteNonQuery(@" - CREATE OR REPLACE FUNCTION pg_temp.emit_exception() RETURNS VOID AS - 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' - LANGUAGE 'plpgsql'; - "); + // Make sure messages are in English + using var dataSource = CreateDataSource(csb => csb.Options = "-c lc_messages=en_US.UTF-8"); + using var conn = dataSource.OpenConnection(); + conn.ExecuteNonQuery( +""" +CREATE OR REPLACE FUNCTION pg_temp.emit_exception() RETURNS VOID AS + 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' +LANGUAGE 'plpgsql'; +"""); PostgresException ex = null!; try @@ -93,9 +92,8 @@ await conn.ExecuteNonQueryAsync($@" [Test] public async Task IncludeErrorDetail() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { IncludeErrorDetail = true }; - using var _ = CreateTempPool(builder, out var connectionStringWithDetails); - await using var conn = await OpenConnectionAsync(connectionStringWithDetails); + await using var dataSource = CreateDataSource(csb => csb.IncludeErrorDetail = true); + await using var conn = await dataSource.OpenConnectionAsync(); var raiseExceptionFunc = await GetTempFunctionName(conn); var raiseNoticeFunc = await GetTempFunctionName(conn); @@ -205,102 +203,10 @@ public void NpgsqlException_with_async() [Test] public void NpgsqlException_IsTransient() { - Assert.True(new NpgsqlException("", new IOException()).IsTransient); - Assert.True(new NpgsqlException("", new SocketException()).IsTransient); - Assert.True(new NpgsqlException("", new TimeoutException()).IsTransient); - Assert.False(new NpgsqlException().IsTransient); - Assert.False(new NpgsqlException("", new Exception("Inner Exception")).IsTransient); - } - - [Test] - public void PostgresException_IsTransient() - { - Assert.True(CreateWithSqlState("53300").IsTransient); - Assert.False(CreateWithSqlState("0").IsTransient); - - PostgresException CreateWithSqlState(string sqlState) - { - var info = CreateSerializationInfo(); - new Exception().GetObjectData(info, default); - - info.AddValue(nameof(PostgresException.Severity), null); - info.AddValue(nameof(PostgresException.InvariantSeverity), null); - info.AddValue(nameof(PostgresException.SqlState), sqlState); - info.AddValue(nameof(PostgresException.MessageText), null); - info.AddValue(nameof(PostgresException.Detail), null); - info.AddValue(nameof(PostgresException.Hint), null); - info.AddValue(nameof(PostgresException.Position), 0); - info.AddValue(nameof(PostgresException.InternalPosition), 0); - info.AddValue(nameof(PostgresException.InternalQuery), null); - info.AddValue(nameof(PostgresException.Where), null); - info.AddValue(nameof(PostgresException.SchemaName), null); - info.AddValue(nameof(PostgresException.TableName), null); - info.AddValue(nameof(PostgresException.ColumnName), null); - info.AddValue(nameof(PostgresException.DataTypeName), null); - info.AddValue(nameof(PostgresException.ConstraintName), null); - info.AddValue(nameof(PostgresException.File), null); - info.AddValue(nameof(PostgresException.Line), null); - info.AddValue(nameof(PostgresException.Routine), null); - - return new PostgresException(info, default); - } - } - -#pragma warning disable SYSLIB0011 -#pragma warning disable 618 - [Test] - public void Serialization() - { - var actual = new PostgresException("message text", "high", "high2", "53300", "detail", "hint", 18, 42, "internal query", - "where", "schema", "table", "column", "data type", "constraint", "file", "line", "routine"); - - var formatter = new BinaryFormatter(); - var stream = new MemoryStream(); - - formatter.Serialize(stream, actual); - stream.Seek(0, SeekOrigin.Begin); - - var expected = (PostgresException)formatter.Deserialize(stream); - - Assert.That(expected.Severity, Is.EqualTo(actual.Severity)); - Assert.That(expected.InvariantSeverity, Is.EqualTo(actual.InvariantSeverity)); - Assert.That(expected.SqlState, Is.EqualTo(actual.SqlState)); - Assert.That(expected.MessageText, Is.EqualTo(actual.MessageText)); - Assert.That(expected.Detail, Is.EqualTo(actual.Detail)); - Assert.That(expected.Hint, Is.EqualTo(actual.Hint)); - Assert.That(expected.Position, Is.EqualTo(actual.Position)); - Assert.That(expected.InternalPosition, Is.EqualTo(actual.InternalPosition)); - Assert.That(expected.InternalQuery, Is.EqualTo(actual.InternalQuery)); - Assert.That(expected.Where, Is.EqualTo(actual.Where)); - Assert.That(expected.SchemaName, Is.EqualTo(actual.SchemaName)); - Assert.That(expected.TableName, Is.EqualTo(actual.TableName)); - Assert.That(expected.ColumnName, Is.EqualTo(actual.ColumnName)); - Assert.That(expected.DataTypeName, Is.EqualTo(actual.DataTypeName)); - Assert.That(expected.ConstraintName, Is.EqualTo(actual.ConstraintName)); - Assert.That(expected.File, Is.EqualTo(actual.File)); - Assert.That(expected.Line, Is.EqualTo(actual.Line)); - Assert.That(expected.Routine, Is.EqualTo(actual.Routine)); - } - - SerializationInfo CreateSerializationInfo() => new(typeof(PostgresException), new FormatterConverter()); -#pragma warning restore 618 -#pragma warning restore SYSLIB0011 - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3204")] - public void Base_exception_property_serialization() - { - var ex = new PostgresException("the message", "low", "low2", "XX123"); - - var info = CreateSerializationInfo(); - ex.GetObjectData(info, default); - - // Check virtual base properties, which can be incorrectly deserialized if overridden, because the base - // Exception.GetObjectData() method writes the fields, not the properties (e.g. "_message" instead of "Message"). - Assert.That(ex.Data, Is.EquivalentTo((IDictionary?)info.GetValue("Data", typeof(IDictionary)))); - Assert.That(ex.HelpLink, Is.EqualTo(info.GetValue("HelpURL", typeof(string)))); - Assert.That(ex.Message, Is.EqualTo(info.GetValue("Message", typeof(string)))); - Assert.That(ex.Source, Is.EqualTo(info.GetValue("Source", typeof(string)))); - Assert.That(ex.StackTrace, Is.EqualTo(info.GetValue("StackTraceString", typeof(string)))); + Assert.That(new NpgsqlException("", new IOException()).IsTransient); + Assert.That(new NpgsqlException("", new SocketException()).IsTransient); + Assert.That(new NpgsqlException("", new TimeoutException()).IsTransient); + Assert.That(new NpgsqlException().IsTransient, Is.False); + Assert.That(new NpgsqlException("", new Exception("Inner Exception")).IsTransient, Is.False); } } diff --git a/test/Npgsql.Tests/FunctionTests.cs b/test/Npgsql.Tests/FunctionTests.cs index 6ca3c2db6d..e755ef746a 100644 --- a/test/Npgsql.Tests/FunctionTests.cs +++ b/test/Npgsql.Tests/FunctionTests.cs @@ -1,10 +1,9 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using Npgsql.PostgresTypes; using NpgsqlTypes; using NUnit.Framework; -using static Npgsql.Util.Statics; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; @@ -108,12 +107,12 @@ public async Task Named_parameters() command.Parameters.AddWithValue("sec", 4); var dt = (DateTime)(await command.ExecuteScalarAsync())!; - Assert.AreEqual(new DateTime(2015, 8, 1, 2, 3, 4), dt); + Assert.That(dt, Is.EqualTo(new DateTime(2015, 8, 1, 2, 3, 4))); command.Parameters[0].Value = 2014; command.Parameters[0].ParameterName = ""; // 2014 will be sent as a positional parameter dt = (DateTime)(await command.ExecuteScalarAsync())!; - Assert.AreEqual(new DateTime(2014, 8, 1, 2, 3, 4), dt); + Assert.That(dt, Is.EqualTo(new DateTime(2014, 8, 1, 2, 3, 4))); } [Test] @@ -144,6 +143,25 @@ public async Task Too_many_output_params() Assert.That(command.Parameters["c"].Value, Is.EqualTo(-1)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5793")] + public async Task ReturnValue_parameter_ignored() + { + await using var conn = await OpenConnectionAsync(); + var funcName = await GetTempFunctionName(conn); + await conn.ExecuteNonQueryAsync(@$"CREATE FUNCTION {funcName}() RETURNS integer AS 'SELECT 8;' LANGUAGE 'sql'"); + await using var cmd = new NpgsqlCommand(funcName, conn) { CommandType = CommandType.StoredProcedure }; + var param = new NpgsqlParameter + { + ParameterName = "@ReturnValue", + NpgsqlDbType = NpgsqlDbType.Integer, + Direction = ParameterDirection.ReturnValue, + Value = 0 + }; + cmd.Parameters.Add(param); + Assert.That(cmd.ExecuteScalar(), Is.EqualTo(8)); + Assert.That(param.Value, Is.EqualTo(0)); + } + [Test] public async Task CommandBehavior_SchemaOnly_support_function_call() { @@ -156,7 +174,33 @@ public async Task CommandBehavior_SchemaOnly_support_function_call() var i = 0; while (dr.Read()) i++; - Assert.AreEqual(0, i); + Assert.That(i, Is.EqualTo(0)); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5820")] + public async Task Output_param_cast_error() + { + await using var conn = await OpenConnectionAsync(); + var function = await GetTempFunctionName(conn); + await conn.ExecuteNonQueryAsync(@$" +CREATE FUNCTION {function} (INOUT param_in int4, OUT param_out interval) AS $$ +BEGIN + param_out = interval '5 years'; +END +$$ LANGUAGE plpgsql"); + await using var cmd = new NpgsqlCommand(function, conn); + cmd.CommandType = CommandType.StoredProcedure; + cmd.Parameters.Add(new NpgsqlParameter("param_in", DbType.Int32) + { + Direction = ParameterDirection.InputOutput, + Value = 1 + }); + cmd.Parameters.Add(new NpgsqlParameter("param_out", NpgsqlDbType.Interval) + { + Direction = ParameterDirection.Output + }); + Assert.ThrowsAsync(cmd.ExecuteNonQueryAsync); + Assert.DoesNotThrowAsync(async () => await conn.ExecuteNonQueryAsync("SELECT 1")); } #region DeriveParameters @@ -246,8 +290,8 @@ await conn.ExecuteNonQueryAsync( { await using var command = new NpgsqlCommand(@"""FunctionCaseSensitive""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -266,8 +310,8 @@ public async Task DeriveParameters_quote_characters_in_function_name() { await using var command = new NpgsqlCommand(function, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -286,8 +330,8 @@ await conn.ExecuteNonQueryAsync( { await using var command = new NpgsqlCommand(@"""My.Dotted.Function""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -305,8 +349,8 @@ await conn.ExecuteNonQueryAsync( $"CREATE FUNCTION {function}(x int, y int, out sum int, out product int) AS 'SELECT $1 + $2, $1 * $2' LANGUAGE sql"); await using var command = new NpgsqlCommand(function, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual("x", command.Parameters[0].ParameterName); - Assert.AreEqual("y", command.Parameters[1].ParameterName); + Assert.That(command.Parameters[0].ParameterName, Is.EqualTo("x")); + Assert.That(command.Parameters[1].ParameterName, Is.EqualTo("y")); } [Test] diff --git a/test/Npgsql.Tests/GlobalTypeMapperTests.cs b/test/Npgsql.Tests/GlobalTypeMapperTests.cs new file mode 100644 index 0000000000..51f950045e --- /dev/null +++ b/test/Npgsql.Tests/GlobalTypeMapperTests.cs @@ -0,0 +1,143 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NUnit.Framework; +using static Npgsql.Tests.TestUtil; + +namespace Npgsql.Tests; + +#pragma warning disable CS0618 // GlobalTypeMapper is obsolete + +[NonParallelizable] +public class GlobalTypeMapperTests : TestBase +{ + [Test] + public async Task MapEnum() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + NpgsqlConnection.GlobalTypeMapper.MapEnum(type); + + await using var dataSource1 = CreateDataSource(); + + await using (var connection = await dataSource1.OpenConnectionAsync()) + { + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + await AssertType(connection, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + } + + NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); + + // Global mapping changes have no effect on already-built data sources + await AssertType(dataSource1, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource1, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); + + // But they do affect new data sources + await using var dataSource2 = CreateDataSource(); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", + type, dataTypeInference: DataTypeInference.Nothing)); + await AssertType(dataSource2, "happy", "happy", "text", dbType: DbType.String); + } + + [Test] + public async Task MapEnum_NonGeneric() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + NpgsqlConnection.GlobalTypeMapper.MapEnum(typeof(Mood), type); + + try + { + await using var dataSource1 = CreateDataSource(); + + await using (var connection = await dataSource1.OpenConnectionAsync()) + { + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + await AssertType(connection, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + } + + NpgsqlConnection.GlobalTypeMapper.UnmapEnum(typeof(Mood), type); + + // Global mapping changes have no effect on already-built data sources + await AssertType(dataSource1, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource1, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); + + // But they do affect new data sources + await using var dataSource2 = CreateDataSource(); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); + await AssertType(dataSource2, "happy", "happy", "text", dbType: DbType.String); + } + finally + { + NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); + } + } + + [Test] + public async Task Reset() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + NpgsqlConnection.GlobalTypeMapper.MapEnum(type); + + await using var dataSource1 = CreateDataSource(); + + await using (var connection = await dataSource1.OpenConnectionAsync()) + { + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + await AssertType(connection, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + } + + // A global mapping change has no effects on data sources which have already been built + NpgsqlConnection.GlobalTypeMapper.Reset(); + + // Global mapping changes have no effect on already-built data sources + await AssertType(dataSource1, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource1, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); + + // But they do affect new data sources + await using var dataSource2 = CreateDataSource(); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); + await AssertType(dataSource2, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); + } + + [Test] + public void Reset_and_add_resolver() + { + NpgsqlConnection.GlobalTypeMapper.Reset(); + NpgsqlConnection.GlobalTypeMapper.AddTypeInfoResolverFactory(new DummyResolverFactory()); + } + + [TearDown] + public void Teardown() + => NpgsqlConnection.GlobalTypeMapper.Reset(); + + enum Mood { Sad, Ok, Happy } + + class DummyResolverFactory : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new DummyResolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + class DummyResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => null; + } + } +} diff --git a/test/Npgsql.Tests/LargeObjectTests.cs b/test/Npgsql.Tests/LargeObjectTests.cs deleted file mode 100644 index 0471ab66ff..0000000000 --- a/test/Npgsql.Tests/LargeObjectTests.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System.Linq; -using System.Text; -using NUnit.Framework; - -namespace Npgsql.Tests; - -public class LargeObjectTests : TestBase -{ - [Test] - public void Test() - { - using var conn = OpenConnection(); - using var transaction = conn.BeginTransaction(); - var manager = new NpgsqlLargeObjectManager(conn); - var oid = manager.Create(); - using (var stream = manager.OpenReadWrite(oid)) - { - var buf = Encoding.UTF8.GetBytes("Hello"); - stream.Write(buf, 0, buf.Length); - stream.Seek(0, System.IO.SeekOrigin.Begin); - var buf2 = new byte[buf.Length]; - stream.Read(buf2, 0, buf2.Length); - Assert.That(buf.SequenceEqual(buf2)); - - Assert.AreEqual(5, stream.Position); - - Assert.AreEqual(5, stream.Length); - - stream.Seek(-1, System.IO.SeekOrigin.Current); - Assert.AreEqual((int)'o', stream.ReadByte()); - - manager.MaxTransferBlockSize = 3; - - stream.Write(buf, 0, buf.Length); - stream.Seek(-5, System.IO.SeekOrigin.End); - var buf3 = new byte[100]; - Assert.AreEqual(5, stream.Read(buf3, 0, 100)); - Assert.That(buf.SequenceEqual(buf3.Take(5))); - - stream.SetLength(43); - Assert.AreEqual(43, stream.Length); - } - - manager.Unlink(oid); - - transaction.Rollback(); - } -} \ No newline at end of file diff --git a/test/Npgsql.Tests/LoggingTests.cs b/test/Npgsql.Tests/LoggingTests.cs new file mode 100644 index 0000000000..0d5d0ee10d --- /dev/null +++ b/test/Npgsql.Tests/LoggingTests.cs @@ -0,0 +1,282 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NpgsqlTypes; +using NUnit.Framework; +using static Npgsql.Tests.TestUtil; + +namespace Npgsql.Tests; + +public class LoggingTests : TestBase +{ + [Test] + public async Task Command_ExecuteScalar_single_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement_with_positional_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); + cmd.Parameters.Add(new() { Value = 8 }); + cmd.Parameters.Add(new() { NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") + .And.Contains("SELECT $1, $2") + .And.Contains("Parameters: [8, NULL]")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); + AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement__Should_unwrap_array_and_truncate_and_write_nulls() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2, $3, $4, $5, $6", conn); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = 1024 }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = [1, 2, 3], NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = [1, null], NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = null }); + cmd.Parameters.Add(new() { NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") + .And.Contains("SELECT $1, $2, $3, $4, $5, $6") + .And.Contains("Parameters: [1024, [1, 2, 3], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...], [1, NULL], NULL, NULL]")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2, $3, $4, $5, $6"); + AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 1024, "[1, 2, 3]", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...]", "[1, NULL]", "NULL", "NULL" }); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement_with_named_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") + .And.Contains("SELECT $1, $2") + .And.Contains("Parameters: [8, NULL]")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); + AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); + cmd.Parameters.Add(new() { Value = 8 }); + cmd.Parameters.Add(new() { Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains($"SELECT $1, $2")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + } + + [Test] + public async Task Command_ExecuteScalar_multiple_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT 1, []), (SELECT 2, [])]")); + var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT 1")); + Assert.That(batchCommands[0].Parameters, Is.Empty); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT 2")); + Assert.That(batchCommands[1].Parameters, Is.Empty); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_multiple_statement_with_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, [8]), (SELECT $1, [9])]")); + var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[0].Parameters.First(), Is.EqualTo(8)); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1].Parameters.First(), Is.EqualTo(9)); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_multiple_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1]")); + var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1], Is.EqualTo("SELECT $1")); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Batch_ExecuteScalar_single_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlBatch(conn) + { + BatchCommands = { new("SELECT 1") } + }; + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Batch_ExecuteScalar_multiple_statements_with_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = + { + new("SELECT $1") { Parameters = { new() { Value = 8 } } }, + new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } + } + }; + + using (listLoggerProvider.Record()) + { + await batch.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, [8]), (SELECT $1, 9, [9])]")); + AssertLoggingStateDoesNotContain(executingCommandEvent, "CommandText"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + + var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[0].Parameters.First(), Is.EqualTo(8)); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1, 9")); + Assert.That(batchCommands[1].Parameters.First(), Is.EqualTo(9)); + } + + [Test] + public async Task Batch_ExecuteScalar_single_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = + { + new("SELECT $1") { Parameters = { new() { Value = 8 } } }, + new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } + } + }; + + using (listLoggerProvider.Record()) + { + await batch.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1, 9]")); + var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1], Is.EqualTo("SELECT $1, 9")); + } +} diff --git a/test/Npgsql.Tests/MetricTests.cs b/test/Npgsql.Tests/MetricTests.cs new file mode 100644 index 0000000000..937019f0ee --- /dev/null +++ b/test/Npgsql.Tests/MetricTests.cs @@ -0,0 +1,208 @@ +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; +using OpenTelemetry; +using OpenTelemetry.Metrics; + +namespace Npgsql.Tests; + +public class MetricTests : TestBase +{ + [Test] + public async Task OperationDuration() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 1"; + await using (var reader = await cmd.ExecuteReaderAsync()) + while (await reader.ReadAsync()); + + meterProvider.ForceFlush(); + + var metric = exportedItems.SingleOrDefault(m => m.Name == "db.client.operation.duration"); + Assert.That(metric, Is.Not.Null, "Metric 'db.client.operation.duration' not found."); + + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).Single(); + + Assert.That(point.GetHistogramSum(), Is.GreaterThan(0)); + Assert.That(point.GetHistogramCount(), Is.EqualTo(1)); + + var tags = ToDictionary(point.Tags); + + using (Assert.EnterMultipleScope()) + { + // TODO: Vary this for PG-like databases (e.g. CockroachDB)? + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + + Assert.That(tags["server.address"], Is.EqualTo(dataSource.Settings.Host)); + Assert.That(tags["server.port"], Is.EqualTo(dataSource.Settings.Port)); + Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(dataSource.Name)); + } + } + + [Test] + public async Task ConnectionCount() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + await using var dataSource = CreateDataSource(); + + using (var _ = await dataSource.OpenConnectionAsync()) + { + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.count"); + var points = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name); + + var usedPoint = GetPoint(points, "used"); + Assert.That(usedPoint.GetSumLong(), Is.EqualTo(1), "Expected used connections to be 1"); + + var idlePoint = GetPoint(points, "idle"); + Assert.That(idlePoint.GetSumLong(), Is.Zero, "Expected idle connections to be 0"); + + exportedItems.Clear(); + } + + meterProvider.ForceFlush(); + + { + var metric = exportedItems.Single(m => m.Name == "db.client.connection.count"); + var points = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name); + + var usedPoint = GetPoint(points, "used"); + Assert.That(usedPoint.GetSumLong(), Is.Zero, "Expected used connections to be 0"); + + var idlePoint = GetPoint(points, "idle"); + Assert.That(idlePoint.GetSumLong(), Is.EqualTo(1), "Expected idle connections to be 1"); + } + + static MetricPoint GetPoint(IEnumerable points, string state) + { + foreach (var point in points) + { + foreach (var tag in point.Tags) + { + if (tag.Key == "db.client.connection.state" && (string?)tag.Value == state) + return point; + } + } + + Assert.Fail($"Point with state '{state}' not found"); + throw new UnreachableException(); + } + } + + [Test] + public async Task ConnectionMax() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.MaxPoolSize = 134; + await using var dataSource = dataSourceBuilder.Build(); + + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.max"); + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).First(p => p.GetSumLong() == 134); + var tags = ToDictionary(point.Tags); + Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(dataSource.Name)); + } + + [Test] + public async Task Pool_name_defaults_to_application_name() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + var applicationName = "MetricsDataSource" + Interlocked.Increment(ref _dataSourceCounter); + var dataSourceBuilder = base.CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.ApplicationName = applicationName; + // Do not set the data source name - this makes the pool name default to the Application Name + await using var dataSource = dataSourceBuilder.Build(); + + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.max"); + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).First(); + var tags = ToDictionary(point.Tags); + Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(applicationName)); + } + + [Test] + public async Task Password_does_not_leak_via_datasource_name([Values] bool persistSecurityInfo) + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + var dataSourceBuilder = base.CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.PersistSecurityInfo = persistSecurityInfo; + // Do not set the data source name or the application name - this makes the pool name default to the + // connection string, but without the password (even when Persist Security Info is true) + await using var dataSource = dataSourceBuilder.Build(); + + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.max"); + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).First(); + var tags = ToDictionary(point.Tags); + var connectionString = new NpgsqlConnectionStringBuilder((string)tags["db.client.connection.pool.name"]!); + Assert.That(connectionString.Password, Is.Null); + } + + static Dictionary ToDictionary(ReadOnlyTagCollection tags) + { + var dict = new Dictionary(); + foreach (var tag in tags) + dict[tag.Key] = tag.Value; + return dict; + } + + protected override NpgsqlDataSourceBuilder CreateDataSourceBuilder() + { + var dataSourceBuilder = base.CreateDataSourceBuilder(); + dataSourceBuilder.Name = "MetricsDataSource" + Interlocked.Increment(ref _dataSourceCounter); + return dataSourceBuilder; + } + + protected override NpgsqlDataSource CreateDataSource() + => CreateDataSourceBuilder().Build(); + + int _dataSourceCounter; + + static IEnumerable GetFilteredPoints(MetricPointsAccessor points, string dataSourceName) + { + foreach (var point in points) + { + foreach (var tag in point.Tags) + { + if (tag.Key == "db.client.connection.pool.name" && (string?)tag.Value == dataSourceName) + yield return point; + } + } + } +} diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index e588efbee6..398c0520ff 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Tests.Support; using NUnit.Framework; using System; @@ -8,11 +8,9 @@ using System.Linq; using System.Net; using System.Net.Sockets; -using System.Runtime.InteropServices; using System.Threading; using System.Threading.Tasks; using System.Transactions; -using Npgsql.Properties; using static Npgsql.Tests.Support.MockState; using static Npgsql.Tests.TestUtil; using IsolationLevel = System.Transactions.IsolationLevel; @@ -20,33 +18,34 @@ namespace Npgsql.Tests; -[NonParallelizable] +#pragma warning disable CS0618 + public class MultipleHostsTests : TestBase { static readonly object[] MyCases = - { - new object[] { "standby", new[] { Primary, Standby }, 1 }, - new object[] { "standby", new[] { PrimaryReadOnly, Standby }, 1 }, - new object[] { "prefer-standby", new[] { Primary, Standby }, 1 }, - new object[] { "prefer-standby", new[] { PrimaryReadOnly, Standby }, 1 }, - new object[] { "prefer-standby", new[] { Primary, Primary }, 0 }, - new object[] { "primary", new[] { Standby, Primary }, 1 }, - new object[] { "primary", new[] { Standby, PrimaryReadOnly }, 1 }, - new object[] { "prefer-primary", new[] { Standby, Primary }, 1 }, - new object[] { "prefer-primary", new[] { Standby, PrimaryReadOnly }, 1 }, - new object[] { "prefer-primary", new[] { Standby, Standby }, 0 }, - new object[] { "any", new[] { Standby, Primary }, 0 }, - new object[] { "any", new[] { Primary, Standby }, 0 }, - new object[] { "any", new[] { PrimaryReadOnly, Standby }, 0 }, - new object[] { "read-write", new[] { Standby, Primary }, 1 }, - new object[] { "read-write", new[] { PrimaryReadOnly, Primary }, 1 }, - new object[] { "read-only", new[] { Primary, Standby }, 1 }, - new object[] { "read-only", new[] { PrimaryReadOnly, Standby }, 0 } - }; + [ + new object[] { TargetSessionAttributes.Standby, new[] { Primary, Standby }, 1 }, + new object[] { TargetSessionAttributes.Standby, new[] { PrimaryReadOnly, Standby }, 1 }, + new object[] { TargetSessionAttributes.PreferStandby, new[] { Primary, Standby }, 1 }, + new object[] { TargetSessionAttributes.PreferStandby, new[] { PrimaryReadOnly, Standby }, 1 }, + new object[] { TargetSessionAttributes.PreferStandby, new[] { Primary, Primary }, 0 }, + new object[] { TargetSessionAttributes.Primary, new[] { Standby, Primary }, 1 }, + new object[] { TargetSessionAttributes.Primary, new[] { Standby, PrimaryReadOnly }, 1 }, + new object[] { TargetSessionAttributes.PreferPrimary, new[] { Standby, Primary }, 1 }, + new object[] { TargetSessionAttributes.PreferPrimary, new[] { Standby, PrimaryReadOnly }, 1 }, + new object[] { TargetSessionAttributes.PreferPrimary, new[] { Standby, Standby }, 0 }, + new object[] { TargetSessionAttributes.Any, new[] { Standby, Primary }, 0 }, + new object[] { TargetSessionAttributes.Any, new[] { Primary, Standby }, 0 }, + new object[] { TargetSessionAttributes.Any, new[] { PrimaryReadOnly, Standby }, 0 }, + new object[] { TargetSessionAttributes.ReadWrite, new[] { Standby, Primary }, 1 }, + new object[] { TargetSessionAttributes.ReadWrite, new[] { PrimaryReadOnly, Primary }, 1 }, + new object[] { TargetSessionAttributes.ReadOnly, new[] { Primary, Standby }, 1 }, + new object[] { TargetSessionAttributes.ReadOnly, new[] { PrimaryReadOnly, Standby }, 0 } + ]; [Test] [TestCaseSource(nameof(MyCases))] - public async Task Connect_to_correct_host_pooled(string targetSessionAttributes, MockState[] servers, int expectedServer) + public async Task Connect_to_correct_host_pooled(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -54,13 +53,13 @@ public async Task Connect_to_correct_host_pooled(string targetSessionAttributes, var connectionStringBuilder = new NpgsqlConnectionStringBuilder { Host = MultipleHosts(postmasters), - TargetSessionAttributes = targetSessionAttributes, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, Pooling = true }; - using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); + await using var conn = await dataSource.OpenConnectionAsync(targetSessionAttributes); Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); @@ -70,7 +69,7 @@ public async Task Connect_to_correct_host_pooled(string targetSessionAttributes, [Test] [TestCaseSource(nameof(MyCases))] - public async Task Connect_to_correct_host_unpooled(string targetSessionAttributes, MockState[] servers, int expectedServer) + public async Task Connect_to_correct_host_unpooled(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -78,13 +77,62 @@ public async Task Connect_to_correct_host_unpooled(string targetSessionAttribute var connectionStringBuilder = new NpgsqlConnectionStringBuilder { Host = MultipleHosts(postmasters), - TargetSessionAttributes = targetSessionAttributes, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, Pooling = false }; + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); + await using var conn = await dataSource.OpenConnectionAsync(targetSessionAttributes); + + Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); + + for (var i = 0; i <= expectedServer; i++) + _ = await postmasters[i].WaitForServerConnection(); + } + + [Test] + [TestCaseSource(nameof(MyCases))] + public async Task Connect_to_correct_host_legacy(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) + { + var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); + await using var __ = new DisposableWrapper(postmasters); + + var connectionStringBuilder = new NpgsqlConnectionStringBuilder + { + Host = MultipleHosts(postmasters), + ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, + TargetSessionAttributes = TargetSessionAttributesAsString(targetSessionAttributes) + }; + using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = new NpgsqlConnection(connectionString); + await conn.OpenAsync(); + + Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); + + for (var i = 0; i <= expectedServer; i++) + _ = await postmasters[i].WaitForServerConnection(); + } + + [Test] + [TestCaseSource(nameof(MyCases))] + public async Task Connect_to_correct_host_connection_string(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) + { + var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); + await using var __ = new DisposableWrapper(postmasters); + + var connectionStringBuilder = new NpgsqlConnectionStringBuilder + { + Host = MultipleHosts(postmasters), + ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, + TargetSessionAttributes = TargetSessionAttributesAsString(targetSessionAttributes) + }; + + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .Build(); + Assert.That(dataSource, Is.TypeOf()); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); @@ -95,7 +143,7 @@ public async Task Connect_to_correct_host_unpooled(string targetSessionAttribute [Test] [TestCaseSource(nameof(MyCases))] public async Task Connect_to_correct_host_with_available_idle( - string targetSessionAttributes, MockState[] servers, int expectedServer) + TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -105,29 +153,26 @@ public async Task Connect_to_correct_host_with_available_idle( var connectionStringBuilder = new NpgsqlConnectionStringBuilder { Host = MultipleHosts(postmasters), - TargetSessionAttributes = servers[0] switch - { - Primary => "read-write", - PrimaryReadOnly => "read-only", - Standby => "standby", - _ => throw new ArgumentOutOfRangeException() - }, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, }; - using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); - await using (_ = await OpenConnectionAsync(connectionString)) + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); + var idleConnTargetSessionAttributes = servers[0] switch + { + Primary => TargetSessionAttributes.ReadWrite, + PrimaryReadOnly => TargetSessionAttributes.ReadOnly, + Standby => TargetSessionAttributes.Standby, + _ => throw new ArgumentOutOfRangeException() + }; + await using (_ = await dataSource.OpenConnectionAsync(idleConnTargetSessionAttributes)) { // Do nothing, close to have an idle connection in the pool. } // Now connect with the test TargetSessionAttributes - connectionString = new NpgsqlConnectionStringBuilder(connectionString) - { - TargetSessionAttributes = targetSessionAttributes.ToString() - }.ConnectionString; - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = await dataSource.OpenConnectionAsync(targetSessionAttributes); Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); @@ -136,11 +181,45 @@ public async Task Connect_to_correct_host_with_available_idle( } [Test] - [TestCase("standby", new[] { Primary, Primary })] - [TestCase("primary", new[] { Standby, Standby })] - [TestCase("read-write", new[] { PrimaryReadOnly, Standby })] - [TestCase("read-only", new[] { Primary, Primary })] - public async Task Valid_host_not_found(string targetSessionAttributes, MockState[] servers) + public async Task Legacy_connection_shares_datasource() + { + await using var primaryPostmaster = PgPostmasterMock.Start(state: Primary); + await using var standbyPostmaster = PgPostmasterMock.Start(state: Standby); + + var builder1 = new NpgsqlConnectionStringBuilder + { + Host = MultipleHosts(primaryPostmaster, standbyPostmaster), + ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, + TargetSessionAttributes = "Prefer-Primary" + }; + + // Use the exact same pool for both connections as CreateTempPool adds a unique `ApplicationName` to connection string + using var pool = CreateTempPool(builder1, out var connectionString1); + var connectionString2 = new NpgsqlConnectionStringBuilder(connectionString1) + { + TargetSessionAttributes = "Prefer-Standby" + }.ConnectionString; + + await using var conn1 = new NpgsqlConnection(connectionString1); + await conn1.OpenAsync(); + Assert.That(conn1.Port, Is.EqualTo(primaryPostmaster.Port)); + + await using var conn2 = new NpgsqlConnection(connectionString2); + await conn2.OpenAsync(); + Assert.That(conn2.Port, Is.EqualTo(standbyPostmaster.Port)); + + Assert.That(conn1.NpgsqlDataSource, Is.Not.SameAs(conn2.NpgsqlDataSource)); + Assert.That(conn1.NpgsqlDataSource, Is.TypeOf()); + Assert.That(conn2.NpgsqlDataSource, Is.TypeOf()); + Assert.That(((MultiHostDataSourceWrapper)conn1.NpgsqlDataSource).WrappedSource, Is.SameAs(((MultiHostDataSourceWrapper)conn2.NpgsqlDataSource).WrappedSource)); + } + + [Test] + [TestCase(TargetSessionAttributes.Standby, new[] { Primary, Primary })] + [TestCase(TargetSessionAttributes.Primary, new[] { Standby, Standby })] + [TestCase(TargetSessionAttributes.ReadWrite, new[] { PrimaryReadOnly, Standby })] + [TestCase(TargetSessionAttributes.ReadOnly, new[] { Primary, Primary })] + public async Task Valid_host_not_found(TargetSessionAttributes targetSessionAttributes, MockState[] servers) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -149,12 +228,12 @@ public async Task Valid_host_not_found(string targetSessionAttributes, MockState { Host = MultipleHosts(postmasters), ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, - TargetSessionAttributes = targetSessionAttributes }; - using var pool = CreateTempPool(connectionStringBuilder.ConnectionString, out var connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); - var exception = Assert.ThrowsAsync(async () => await OpenConnectionAsync(connectionString))!; + var exception = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync(targetSessionAttributes))!; Assert.That(exception.Message, Is.EqualTo("No suitable host was found.")); Assert.That(exception.InnerException, Is.Null); @@ -165,10 +244,6 @@ public async Task Valid_host_not_found(string targetSessionAttributes, MockState [Test, Platform(Exclude = "MacOsX", Reason = "#3786")] public void All_hosts_are_down() { - // Different exception raised in .NET Core 3.1, skip (NUnit doesn't seem to support detecting .NET Core versions) - if (RuntimeInformation.FrameworkDescription.StartsWith(".NET Core 3.1")) - return; - var endpoint = new IPEndPoint(IPAddress.Loopback, 0); using var socket1 = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); @@ -185,8 +260,9 @@ public void All_hosts_are_down() { Host = $"{localEndPoint1.Address}:{localEndPoint1.Port},{localEndPoint2.Address}:{localEndPoint2.Port}" }.ConnectionString; + using var dataSource = new NpgsqlDataSourceBuilder(connectionString).BuildMultiHost(); - var exception = Assert.ThrowsAsync(async () => await OpenConnectionAsync(connectionString))!; + var exception = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync(TargetSessionAttributes.Any))!; var aggregateException = (AggregateException)exception.InnerException!; Assert.That(aggregateException.InnerExceptions, Has.Count.EqualTo(2)); @@ -210,18 +286,16 @@ public async Task All_hosts_are_unavailable( { Host = MultipleHosts(primaryPostmaster, standbyPostmaster), ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, - TargetSessionAttributes = "any", Pooling = pooling, }; - using var _ = CreateTempPool(builder.ConnectionString, out var connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(builder.ConnectionString).BuildMultiHost(); - var ex = Assert.ThrowsAsync(async () => await OpenConnectionAsync(connectionString))!; + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync(TargetSessionAttributes.Any))!; Assert.That(ex.SqlState, Is.EqualTo(errorCode)); } [Test] - [Platform(Exclude = "MacOsX", Reason = "Flaky in CI on Mac")] public async Task First_host_is_down() { using var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); @@ -238,7 +312,9 @@ public async Task First_host_is_down() ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading }.ConnectionString; - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionString).BuildMultiHost(); + + await using var conn = await dataSource.OpenConnectionAsync(TargetSessionAttributes.Any); Assert.That(conn.Port, Is.EqualTo(postmaster.Port)); } @@ -259,14 +335,15 @@ public async Task TargetSessionAttributes_with_single_host(string targetSessionA if (targetSessionAttributes == "any") { - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var postmasterMock = PgPostmasterMock.Start(connectionString); using var pool = CreateTempPool(postmasterMock.ConnectionString, out connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = new NpgsqlConnection(connectionString); + await conn.OpenAsync(); _ = await postmasterMock.WaitForServerConnection(); } else { - Assert.That(() => OpenConnectionAsync(connectionString), Throws.Exception.TypeOf()); + Assert.That(() => new NpgsqlConnection(connectionString), Throws.Exception.TypeOf()); } } @@ -291,9 +368,10 @@ public async Task TargetSessionAttributes_uses_environment_variable() Assert.That(builder.TargetSessionAttributes, Is.Null); - using var _ = CreateTempPool(builder.ConnectionString, out var connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(builder.ConnectionString) + .BuildMultiHost(); - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.Port, Is.EqualTo(standbyPostmaster.Port)); } @@ -326,7 +404,7 @@ public void HostRecheckSeconds_zero_value() [Test] public void HostRecheckSeconds_invalid_throws() - => Assert.Throws(() => + => Assert.Throws(() => new NpgsqlConnectionStringBuilder { HostRecheckSeconds = -1 @@ -346,36 +424,37 @@ public async Task Connect_with_load_balancing() LoadBalanceHosts = true, }; - using var _ = CreateTempPool(defaultCsb.ConnectionString, out var defaultConnectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(defaultCsb.ConnectionString) + .BuildMultiHost(); NpgsqlConnector firstConnector; NpgsqlConnector secondConnector; - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync()) { firstConnector = firstConnection.Connector!; } - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondConnection = await dataSource.OpenConnectionAsync()) { secondConnector = secondConnection.Connector!; } - Assert.AreNotSame(firstConnector, secondConnector); + Assert.That(secondConnector, Is.Not.SameAs(firstConnector)); - await using (var firstBalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstBalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, firstBalancedConnection.Connector); + Assert.That(firstBalancedConnection.Connector, Is.SameAs(firstConnector)); } - await using (var secondBalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondBalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(secondConnector, secondBalancedConnection.Connector); + Assert.That(secondBalancedConnection.Connector, Is.SameAs(secondConnector)); } - await using (var thirdBalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var thirdBalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, thirdBalancedConnection.Connector); + Assert.That(thirdBalancedConnection.Connector, Is.SameAs(firstConnector)); } } @@ -393,35 +472,36 @@ public async Task Connect_without_load_balancing() LoadBalanceHosts = false, }; - using var _ = CreateTempPool(defaultCsb.ConnectionString, out var defaultConnectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(defaultCsb.ConnectionString) + .BuildMultiHost(); NpgsqlConnector firstConnector; NpgsqlConnector secondConnector; - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync()) { firstConnector = firstConnection.Connector!; } - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, secondConnection.Connector); + Assert.That(secondConnection.Connector, Is.SameAs(firstConnector)); } - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync()) + await using (var secondConnection = await dataSource.OpenConnectionAsync()) { secondConnector = secondConnection.Connector!; } - Assert.AreNotSame(firstConnector, secondConnector); + Assert.That(secondConnector, Is.Not.SameAs(firstConnector)); - await using (var firstUnbalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstUnbalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, firstUnbalancedConnection.Connector); + Assert.That(firstUnbalancedConnection.Connector, Is.SameAs(firstConnector)); } - await using (var secondUnbalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondUnbalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, secondUnbalancedConnection.Connector); + Assert.That(secondUnbalancedConnection.Connector, Is.SameAs(firstConnector)); } } @@ -437,11 +517,11 @@ public async Task Connect_state_changing_hosts([Values] bool alwaysCheckHostStat ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, MaxPoolSize = 1, HostRecheckSeconds = alwaysCheckHostState ? 0 : int.MaxValue, - TargetSessionAttributes = "prefer-primary", NoResetOnClose = true, }; - using var _ = CreateTempPool(defaultCsb.ConnectionString, out var defaultConnectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(defaultCsb.ConnectionString) + .BuildMultiHost(); NpgsqlConnector firstConnector; NpgsqlConnector secondConnector; @@ -474,15 +554,15 @@ public async Task Connect_state_changing_hosts([Values] bool alwaysCheckHostStat await server.SendMockState(Primary); }); - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary)) + await using (var secondConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary)) { firstConnector = firstConnection.Connector!; secondConnector = secondConnection.Connector!; } - await using var thirdConnection = await OpenConnectionAsync(defaultConnectionString); - Assert.AreSame(alwaysCheckHostState ? secondConnector : firstConnector, thirdConnection.Connector); + await using var thirdConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary); + Assert.That(thirdConnection.Connector, Is.SameAs(alwaysCheckHostState ? secondConnector : firstConnector)); await firstServerTask; await secondServerTask; @@ -495,22 +575,22 @@ public void Database_state_cache_basic() var timeStamp = DateTime.UtcNow; dataSource.UpdateDatabaseState(DatabaseState.PrimaryReadWrite, timeStamp, TimeSpan.Zero); - Assert.AreEqual(DatabaseState.PrimaryReadWrite, dataSource.GetDatabaseState()); + Assert.That(dataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadWrite)); // Update with the same timestamp - shouldn't change anything dataSource.UpdateDatabaseState(DatabaseState.Standby, timeStamp, TimeSpan.Zero); - Assert.AreEqual(DatabaseState.PrimaryReadWrite, dataSource.GetDatabaseState()); + Assert.That(dataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadWrite)); // Update with a new timestamp timeStamp = timeStamp.AddSeconds(1); dataSource.UpdateDatabaseState(DatabaseState.PrimaryReadOnly, timeStamp, TimeSpan.Zero); - Assert.AreEqual(DatabaseState.PrimaryReadOnly, dataSource.GetDatabaseState()); + Assert.That(dataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadOnly)); // Expired state returns as Unknown (depending on ignoreExpiration) timeStamp = timeStamp.AddSeconds(1); dataSource.UpdateDatabaseState(DatabaseState.PrimaryReadWrite, timeStamp, TimeSpan.FromSeconds(-1)); - Assert.AreEqual(DatabaseState.Unknown, dataSource.GetDatabaseState(ignoreExpiration: false)); - Assert.AreEqual(DatabaseState.PrimaryReadWrite, dataSource.GetDatabaseState(ignoreExpiration: true)); + Assert.That(dataSource.GetDatabaseState(ignoreExpiration: false), Is.EqualTo(DatabaseState.Unknown)); + Assert.That(dataSource.GetDatabaseState(ignoreExpiration: true), Is.EqualTo(DatabaseState.PrimaryReadWrite)); } [Test] @@ -569,25 +649,23 @@ public async Task Offline_state_on_query_execution_pg_critical_failure() [Test, NonParallelizable] public async Task Offline_state_on_query_execution_pg_non_critical_failure() { - PoolManager.Reset(); - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); // Starting with PG14 we get the cluster's state from PG automatically var expectedState = conn.PostgreSqlVersion.Major > 13 ? DatabaseState.PrimaryReadWrite : DatabaseState.Unknown; - var state = conn.NpgsqlDataSource.GetDatabaseState(); + var state = dataSource.GetDatabaseState(); Assert.That(state, Is.EqualTo(expectedState)); - Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(1)); + Assert.That(dataSource.Statistics.Total, Is.EqualTo(1)); var ex = Assert.ThrowsAsync(() => conn.ExecuteNonQueryAsync("SELECT abc"))!; Assert.That(ex.SqlState, Is.EqualTo(PostgresErrorCodes.UndefinedColumn)); Assert.That(conn.State, Is.EqualTo(ConnectionState.Open)); - state = conn.NpgsqlDataSource.GetDatabaseState(); + state = dataSource.GetDatabaseState(); Assert.That(state, Is.EqualTo(expectedState)); - Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(1)); + Assert.That(dataSource.Statistics.Total, Is.EqualTo(1)); } [Test] @@ -619,10 +697,11 @@ public async Task Offline_state_on_query_execution_IOException() public async Task Offline_state_on_query_execution_TimeoutException() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 1; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = 1; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 1; + builder.ConnectionStringBuilder.CancellationTimeout = 1; + }); await using var conn = await dataSource.OpenConnectionAsync(); await using var anotherConn = await dataSource.OpenConnectionAsync(); @@ -645,10 +724,11 @@ public async Task Offline_state_on_query_execution_TimeoutException() public async Task Unknown_state_on_query_execution_TimeoutException_with_disabled_cancellation() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 1; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = -1; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 1; + builder.ConnectionStringBuilder.CancellationTimeout = -1; + }); await using var conn = await dataSource.OpenConnectionAsync(); await using var anotherConn = await dataSource.OpenConnectionAsync(); @@ -671,10 +751,11 @@ public async Task Unknown_state_on_query_execution_TimeoutException_with_disable public async Task Unknown_state_on_query_execution_cancellation_with_disabled_cancellation_timeout() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 30; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = -1; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 30; + builder.ConnectionStringBuilder.CancellationTimeout = -1; + }); await using var conn = await dataSource.OpenConnectionAsync(); await using var anotherConn = await dataSource.OpenConnectionAsync(); @@ -701,10 +782,11 @@ public async Task Unknown_state_on_query_execution_cancellation_with_disabled_ca public async Task Unknown_state_on_query_execution_TimeoutException_with_cancellation_failure() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 1; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = 0; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 1; + builder.ConnectionStringBuilder.CancellationTimeout = 0; + }); await using var conn = await dataSource.OpenConnectionAsync(); @@ -791,6 +873,8 @@ public async Task Transaction_enlist_reuses_connection(string targetSessionAttri TargetSessionAttributes = targetSessionAttributes, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, MaxPoolSize = 10, + // Our mock PG server doesn't know how to handle the reset messages + NoResetOnClose = true, }; using var _ = CreateTempPool(csb, out var connString); @@ -891,7 +975,6 @@ await firstServer Assert.That(secondDataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadWrite)); } - // This is the only test in this class which actually connects to PostgreSQL (the others use the PostgreSQL mock) [Test, NonParallelizable] public void IntegrationTest([Values] bool loadBalancing, [Values] bool alwaysCheckHostState) { @@ -925,7 +1008,7 @@ public void IntegrationTest([Values] bool loadBalancing, [Values] bool alwaysChe Assert.DoesNotThrowAsync(() => clientsTask); Assert.ThrowsAsync(() => onlyStandbyClient); Assert.ThrowsAsync(() => readOnlyClient); - Assert.AreEqual(125, queriesDone); + Assert.That(queriesDone, Is.EqualTo(125)); Task Client(NpgsqlMultiHostDataSource multiHostDataSource, TargetSessionAttributes targetSessionAttributes) { @@ -953,6 +1036,26 @@ async Task Query(NpgsqlDataSource dataSource) } } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/5055")] + [NonParallelizable] // Disables sql rewriting + public async Task Multiple_hosts_with_disabled_sql_rewriting() + { + using var _ = DisableSqlRewriting(); + + var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString) + { + ConnectionStringBuilder = + { + Host = "localhost,127.0.0.1", + Pooling = true, + HostRecheckSeconds = 0 + } + }; + await using var dataSource = dataSourceBuilder.BuildMultiHost(); + await using var conn = await dataSource.OpenConnectionAsync(); + } + [Test] public async Task DataSource_with_wrappers() { @@ -1003,15 +1106,6 @@ public async Task DataSource_without_wrappers() Assert.That(standbyConnection.Port, Is.EqualTo(standbyPostmasterMock.Port)); } - [Test] - public void DataSource_with_TargetSessionAttributes_is_not_supported() - { - var builder = new NpgsqlDataSourceBuilder("Host=foo,bar;Target Session Attributes=primary"); - - Assert.That(() => builder.BuildMultiHost(), Throws.Exception.TypeOf() - .With.Message.EqualTo(NpgsqlStrings.CannotSpecifyTargetSessionAttributes)); - } - [Test] public async Task BuildMultiHost_with_single_host_is_supported() { @@ -1040,18 +1134,149 @@ public async Task Build_with_multiple_hosts_is_supported() await using var connection = await dataSource.OpenConnectionAsync(); } - static string MultipleHosts(params PgPostmasterMock[] postmasters) - => string.Join(",", postmasters.Select(p => $"{p.Host}:{p.Port}")); + [Test] + public async Task OpenConnection_when_canceled_throws_TaskCanceledException() + { + var builder = new NpgsqlDataSourceBuilder(ConnectionString); + await using var dataSource = builder.BuildMultiHost(); + using var cts = new CancellationTokenSource(); + cts.Cancel(); + var ex = Assert.ThrowsAsync(async () => + { + await using var connection = await dataSource.OpenConnectionAsync(cts.Token); + }); + Assert.That(ex.CancellationToken, Is.EqualTo(cts.Token)); + } - class DisposableWrapper : IAsyncDisposable + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4181")] + [Explicit("Fails until #4181 is fixed.")] + public async Task LoadBalancing_is_fair_if_first_host_is_down([Values]TargetSessionAttributes targetSessionAttributes) { - readonly IEnumerable _disposables; + await using var pDown = PgPostmasterMock.Start(state: Primary, startupErrorCode: PostgresErrorCodes.CannotConnectNow); + await using var pRw1 = PgPostmasterMock.Start(state: Primary); + await using var pR1 = PgPostmasterMock.Start(state: PrimaryReadOnly); + await using var s1 = PgPostmasterMock.Start(state: Standby); + await using var pRw2 = PgPostmasterMock.Start(state: Primary); + await using var pR2 = PgPostmasterMock.Start(state: PrimaryReadOnly); + await using var s2 = PgPostmasterMock.Start(state: Standby); + + var hostList = $"{pDown.Host}:{pDown.Port}," + + $"{pRw1.Host}:{pRw1.Port}," + + $"{pR1.Host}:{pR1.Port}," + + $"{s1.Host}:{s1.Port}," + + $"{pRw2.Host}:{pRw2.Port}," + + $"{pR2.Host}:{pR2.Port}," + + $"{s2.Host}:{s2.Port}"; + + await using var dataSource = CreateDataSource(builder => + { + builder.Host = hostList; + builder.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading; + builder.LoadBalanceHosts = true; + builder.TargetSessionAttributesParsed = targetSessionAttributes; + + }); + var connections = Enumerable.Repeat(0, 12).Select(_ => dataSource.OpenConnection()).ToArray(); + await using var __ = new DisposableWrapper(connections); + + switch (targetSessionAttributes) + { + case TargetSessionAttributes.Any: + Assert.That(connections[0].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[2].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[3].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[5].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[7].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[8].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[11].Port, Is.EqualTo(s2.Port)); + break; + case TargetSessionAttributes.ReadWrite: + Assert.That(connections[0].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[2].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[3].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[7].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[11].Port, Is.EqualTo(pRw2.Port)); + break; + case TargetSessionAttributes.ReadOnly: + Assert.That(connections[0].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[2].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[3].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[7].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[11].Port, Is.EqualTo(s2.Port)); + break; + case TargetSessionAttributes.Primary: + case TargetSessionAttributes.PreferPrimary: + Assert.That(connections[0].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[2].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[3].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[7].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[11].Port, Is.EqualTo(pR2.Port)); + break; + case TargetSessionAttributes.Standby: + case TargetSessionAttributes.PreferStandby: + Assert.That(connections[0].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[2].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[3].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[6].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[7].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[10].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[11].Port, Is.EqualTo(s2.Port)); + break; + } + } + + static string MultipleHosts(params PgPostmasterMock[] postmasters) + => string.Join(",", postmasters.Select(p => $"{p.Host}:{p.Port}")); - public DisposableWrapper(IEnumerable disposables) => _disposables = disposables; + static string? TargetSessionAttributesAsString(TargetSessionAttributes targetSessionAttributes) + => targetSessionAttributes switch + { + TargetSessionAttributes.Any => "Any", + TargetSessionAttributes.Primary => "Primary", + TargetSessionAttributes.Standby => "Standby", + TargetSessionAttributes.PreferPrimary => "Prefer-Primary", + TargetSessionAttributes.PreferStandby => "Prefer-Standby", + TargetSessionAttributes.ReadOnly => "Read-Only", + TargetSessionAttributes.ReadWrite => "Read-Write", + _ => null + }; + sealed class DisposableWrapper(IEnumerable disposables) : IAsyncDisposable + { public async ValueTask DisposeAsync() { - foreach (var disposable in _disposables) + foreach (var disposable in disposables) await disposable.DisposeAsync(); } } diff --git a/test/Npgsql.Tests/NestedDataReaderTests.cs b/test/Npgsql.Tests/NestedDataReaderTests.cs index 72553a6b5e..52531fbdd2 100644 --- a/test/Npgsql.Tests/NestedDataReaderTests.cs +++ b/test/Npgsql.Tests/NestedDataReaderTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; using System; using System.Threading.Tasks; using static Npgsql.Tests.TestUtil; @@ -199,15 +199,15 @@ public void GetBytes() Assert.That(nestedReader.GetBytes(0, 0, null, 0, 4), Is.EqualTo(3)); Assert.That(nestedReader.GetBytes(0, 0, buf, 0, 3), Is.EqualTo(3)); Assert.That(nestedReader.GetBytes(0, 0, buf, 0, 4), Is.EqualTo(3)); - CollectionAssert.AreEqual(new byte[] { 1, 2, 3, 0 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 1, 2, 3, 0 }).AsCollection); buf = new byte[2]; Assert.That(nestedReader.GetBytes(0, 0, buf, 0, 2), Is.EqualTo(2)); - CollectionAssert.AreEqual(new byte[] { 1, 2 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 1, 2 }).AsCollection); buf = new byte[2]; Assert.That(nestedReader.GetBytes(0, 1, buf, 1, 1), Is.EqualTo(1)); - CollectionAssert.AreEqual(new byte[] { 0, 2 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 0, 2 }).AsCollection); Assert.That(nestedReader.GetBytes(0, 2, buf, 1, 1), Is.EqualTo(1)); - CollectionAssert.AreEqual(new byte[] { 0, 3 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 0, 3 }).AsCollection); Assert.Throws(() => nestedReader.GetBytes(1, 0, buf, 0, 1)); Assert.Throws(() => nestedReader.GetBytes(0, 4, buf, 0, 1)); } diff --git a/test/Npgsql.Tests/NotificationTests.cs b/test/Npgsql.Tests/NotificationTests.cs index 0092dfdad4..08e7a8f605 100644 --- a/test/Npgsql.Tests/NotificationTests.cs +++ b/test/Npgsql.Tests/NotificationTests.cs @@ -1,9 +1,8 @@ -using NUnit.Framework; +using NUnit.Framework; using System; using System.Data; using System.Threading; using System.Threading.Tasks; -using Npgsql.Internal; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; @@ -20,7 +19,7 @@ public void Notification() conn.ExecuteNonQuery($"LISTEN {notify}"); conn.Notification += (o, e) => receivedNotification = true; conn.ExecuteNonQuery($"NOTIFY {notify}"); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); } [Test, Description("Generates a notification that arrives after reader data that is already being read")] @@ -54,12 +53,12 @@ public async Task Notification_after_data() // Allow some time for the notification to get delivered await Task.Delay(2000); - Assert.IsTrue(reader.Read()); - Assert.AreEqual(1, reader.GetValue(0)); + Assert.That(reader.Read()); + Assert.That(reader.GetValue(0), Is.EqualTo(1)); } Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1024")] @@ -74,7 +73,7 @@ public void Wait() notifyingConn.ExecuteNonQuery($"NOTIFY {notify}"); conn.Notification += (o, e) => receivedNotification = true; Assert.That(conn.Wait(0), Is.EqualTo(true)); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); } @@ -89,9 +88,9 @@ public void Wait_with_timeout() [Test] public void Wait_with_prepended_message() { - using var _ = CreateTempPool(ConnectionString, out var connString); - using (OpenConnection(connString)) {} // A DISCARD ALL is now prepended in the connection's write buffer - using var conn = OpenConnection(connString); + using var dataSource = CreateDataSource(); + using (dataSource.OpenConnection()) {} // A DISCARD ALL is now prepended in the connection's write buffer + using var conn = dataSource.OpenConnection(); Assert.That(conn.Wait(100), Is.EqualTo(false)); } @@ -107,7 +106,7 @@ public async Task WaitAsync() await notifyingConn.ExecuteNonQueryAsync($"NOTIFY {notify}"); conn.Notification += (o, e) => receivedNotification = true; await conn.WaitAsync(0); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } @@ -120,23 +119,23 @@ public void WaitAsync_with_timeout() } [Test] - public async Task Wait_with_keepalive() + public void Wait_with_keepalive() { var notify = GetUniqueIdentifier(nameof(NotificationTests)); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - KeepAlive = 1, - Pooling = false - }; - using var conn = OpenConnection(csb); - using var notifyingConn = OpenConnection(); + csb.KeepAlive = 1; + csb.Pooling = false; + }); + using var conn = dataSource.OpenConnection(); + using var notifyingConn = dataSource.OpenConnection(); conn.ExecuteNonQuery($"LISTEN {notify}"); var notificationTask = Task.Delay(2000).ContinueWith(t => notifyingConn.ExecuteNonQuery($"NOTIFY {notify}")); conn.Wait(); Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); // A safeguard against closing an active connection - await notificationTask; + notificationTask.GetAwaiter().GetResult(); //Assert.That(TestLoggerSink.Records, Has.Some.With.Property("EventId").EqualTo(new EventId(NpgsqlEventId.Keepalive))); } @@ -145,18 +144,18 @@ public async Task WaitAsync_with_keepalive() { var notify = GetUniqueIdentifier(nameof(NotificationTests)); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - KeepAlive = 1, - Pooling = false - }; - using var conn = OpenConnection(csb); - using var notifyingConn = OpenConnection(); - conn.ExecuteNonQuery($"LISTEN {notify}"); + csb.KeepAlive = 1; + csb.Pooling = false; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var notifyingConn = await dataSource.OpenConnectionAsync(); + await conn.ExecuteNonQueryAsync($"LISTEN {notify}"); var notificationTask = Task.Delay(2000).ContinueWith(t => notifyingConn.ExecuteNonQuery($"NOTIFY {notify}")); await conn.WaitAsync(); //Assert.That(TestLoggerSink.Records, Has.Some.With.Property("EventId").EqualTo(new EventId(NpgsqlEventId.Keepalive))); - Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); // A safeguard against closing an active connection await notificationTask; } @@ -212,4 +211,20 @@ public void WaitAsync_breaks_connection() Assert.That(pgEx.SqlState, Is.EqualTo(PostgresErrorCodes.AdminShutdown)); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4911")] + public async Task Big_notice_while_loading_types() + { + await using var adminConn = await OpenConnectionAsync(); + // Max notification payload is 8000 + await using var dataSource = CreateDataSource(csb => csb.ReadBufferSize = 4096); + await using var conn = await dataSource.OpenConnectionAsync(); + + var notify = GetUniqueIdentifier(nameof(Big_notice_while_loading_types)); + await conn.ExecuteNonQueryAsync($"LISTEN {notify}"); + var payload = new string('a', 5000); + await adminConn.ExecuteNonQueryAsync($"NOTIFY {notify}, '{payload}'"); + + await conn.ReloadTypesAsync(); + } } diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 7952ad6301..8e04167e9d 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -1,13 +1,29 @@ - + - + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + + + + PreserveNewest + + + + true + $(NoWarn);NPG9001 + $(NoWarn);NPG9002 + $(NoWarn);NPG9003 + diff --git a/test/Npgsql.Tests/NpgsqlEventSourceTests.cs b/test/Npgsql.Tests/NpgsqlEventSourceTests.cs index c1659e6fba..1da8c0745d 100644 --- a/test/Npgsql.Tests/NpgsqlEventSourceTests.cs +++ b/test/Npgsql.Tests/NpgsqlEventSourceTests.cs @@ -44,12 +44,10 @@ public void DisableEventSource() TestEventListener _listener = null!; - readonly List _events = new(); + readonly List _events = []; - class TestEventListener : EventListener + class TestEventListener(List events) : EventListener { - readonly List _events; - public TestEventListener(List events) => _events = events; - protected override void OnEventWritten(EventWrittenEventArgs eventData) => _events.Add(eventData); + protected override void OnEventWritten(EventWrittenEventArgs eventData) => events.Add(eventData); } } diff --git a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs index 94c84b2747..901e34ece9 100644 --- a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs @@ -4,12 +4,15 @@ using System.Data; using System.Data.Common; using System.Diagnostics.CodeAnalysis; +using System.Linq; namespace Npgsql.Tests; -[NonParallelizable] // This test class has global effects on case sensitive matching in param collection. [TestFixture(CompatMode.OnePass)] +#if DEBUG [TestFixture(CompatMode.TwoPass)] +[NonParallelizable] // This test class has global effects on case sensitive matching in param collection. +#endif public class NpgsqlParameterCollectionTests { readonly CompatMode _compatMode; @@ -34,13 +37,13 @@ public void Clear() var c1 = new NpgsqlCommand(); var c2 = new NpgsqlCommand(); c1.Parameters.Add(p); - Assert.AreEqual(1, c1.Parameters.Count); - Assert.AreEqual(0, c2.Parameters.Count); + Assert.That(c1.Parameters.Count, Is.EqualTo(1)); + Assert.That(c2.Parameters.Count, Is.EqualTo(0)); c1.Parameters.Clear(); - Assert.AreEqual(0, c1.Parameters.Count); + Assert.That(c1.Parameters.Count, Is.EqualTo(0)); c2.Parameters.Add(p); - Assert.AreEqual(0, c1.Parameters.Count); - Assert.AreEqual(1, c2.Parameters.Count); + Assert.That(c1.Parameters.Count, Is.EqualTo(0)); + Assert.That(c2.Parameters.Count, Is.EqualTo(1)); } [Test] @@ -57,7 +60,7 @@ public void Hash_lookup_parameter_rename_bug() } // Make sure hash lookup is generated. - Assert.AreEqual(command.Parameters["p03"].ParameterName, "p03"); + Assert.That(command.Parameters["p03"].ParameterName, Is.EqualTo("p03")); // Rename the target parameter. command.Parameters["p03"].ParameterName = "a_new_name"; @@ -68,6 +71,34 @@ public void Hash_lookup_parameter_rename_bug() Assert.That(command.Parameters.IndexOf("a_new_name"), Is.GreaterThanOrEqualTo(0)); } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/6067")] + public void Hash_lookup_unnamed_parameter_rename_bug() + { + if (_compatMode == CompatMode.TwoPass) + return; + + using var command = new NpgsqlCommand(); + + for (var i = 0; i < 3; i++) + { + // Put plenty of parameters in the collection to turn on hash lookup functionality. + for (var j = 0; j < LookupThreshold; j++) + { + // Create and add an unnamed parameter before renaming it + var parameter = command.CreateParameter(); + command.Parameters.Add(parameter); + parameter.ParameterName = $"{j}"; + } + + // Make sure hash lookup is generated. + Assert.That(command.Parameters["3"].ParameterName, Is.EqualTo("3")); + + // Remove all parameters to clear hash lookup + command.Parameters.Clear(); + } + } + [Test] public void Remove_duplicate_parameter([Values(LookupThreshold, LookupThreshold - 2)] int count) { @@ -83,7 +114,7 @@ public void Remove_duplicate_parameter([Values(LookupThreshold, LookupThreshold } // Make sure lookup is generated. - Assert.AreEqual(command.Parameters["p02"].ParameterName, "p02"); + Assert.That(command.Parameters["p02"].ParameterName, Is.EqualTo("p02")); // Add uppercased version causing a list to be created. command.Parameters.AddWithValue("P02", NpgsqlDbType.Text, "String parameter value 2"); @@ -92,10 +123,10 @@ public void Remove_duplicate_parameter([Values(LookupThreshold, LookupThreshold command.Parameters.Remove(command.Parameters["p02"]); // Test whether we can still find the last added parameter, and if its index is correctly shifted in the lookup. - Assert.IsTrue(command.Parameters.IndexOf("p02") == count - 1); - Assert.IsTrue(command.Parameters.IndexOf("P02") == count - 1); + Assert.That(command.Parameters.IndexOf("p02") == count - 1); + Assert.That(command.Parameters.IndexOf("P02") == count - 1); // And finally test whether other parameters were also correctly shifted. - Assert.IsTrue(command.Parameters.IndexOf("p03") == 1); + Assert.That(command.Parameters.IndexOf("p03") == 1); } [Test] @@ -113,8 +144,8 @@ public void Remove_parameter([Values(LookupThreshold, LookupThreshold - 2)] int command.Parameters.Remove(command.Parameters["p02"]); // Make sure we cannot find it, also not case insensitively. - Assert.IsTrue(command.Parameters.IndexOf("p02") == -1); - Assert.IsTrue(command.Parameters.IndexOf("P02") == -1); + Assert.That(command.Parameters.IndexOf("p02") == -1); + Assert.That(command.Parameters.IndexOf("P02") == -1); } [Test] @@ -153,7 +184,7 @@ public void Correct_index_returned_for_duplicate_ParameterName([Values(LookupThr } // Make sure lookup is generated. - Assert.AreEqual(command.Parameters["parameter02"].ParameterName, "parameter02"); + Assert.That(command.Parameters["parameter02"].ParameterName, Is.EqualTo("parameter02")); // Add uppercased version. command.Parameters.AddWithValue("Parameter02", NpgsqlDbType.Text, "String parameter value 2"); @@ -162,14 +193,14 @@ public void Correct_index_returned_for_duplicate_ParameterName([Values(LookupThr command.Parameters.Insert(0, new NpgsqlParameter("ParameteR02", NpgsqlDbType.Text) { Value = "String parameter value 2" }); // Try to find the exact index. - Assert.IsTrue(command.Parameters.IndexOf("parameter02") == 2); - Assert.IsTrue(command.Parameters.IndexOf("Parameter02") == command.Parameters.Count - 1); - Assert.IsTrue(command.Parameters.IndexOf("ParameteR02") == 0); + Assert.That(command.Parameters.IndexOf("parameter02") == 2); + Assert.That(command.Parameters.IndexOf("Parameter02") == command.Parameters.Count - 1); + Assert.That(command.Parameters.IndexOf("ParameteR02") == 0); // This name does not exist so we expect the first case insensitive match to be returned. - Assert.IsTrue(command.Parameters.IndexOf("ParaMeteR02") == 0); + Assert.That(command.Parameters.IndexOf("ParaMeteR02") == 0); // And finally test whether other parameters were also correctly shifted. - Assert.IsTrue(command.Parameters.IndexOf("parameter03") == 3); + Assert.That(command.Parameters.IndexOf("parameter03") == 3); } [Test] @@ -225,6 +256,15 @@ public void Positional_parameter_lookup_returns_first_match([Values(LookupThresh Assert.That(command.Parameters.IndexOf(""), Is.EqualTo(0)); } + [Test] + public void Throw_multiple_positions_same_instance() + { + using var cmd = new NpgsqlCommand("SELECT $1, $2"); + var p = new NpgsqlParameter("", "Hello world"); + cmd.Parameters.Add(p); + Assert.Throws(() => cmd.Parameters.Add(p)); + } + [Test] public void IndexOf_falls_back_to_first_insensitive_match([Values] bool manyParams) { @@ -305,10 +345,22 @@ public void Clean_name() param.ParameterName = null; // These should not throw exceptions - Assert.AreEqual(0, command.Parameters.IndexOf(param.ParameterName)); - Assert.AreEqual(NpgsqlParameter.PositionalName, param.ParameterName); + Assert.That(command.Parameters.IndexOf(param.ParameterName), Is.EqualTo(0)); + Assert.That(param.ParameterName, Is.EqualTo(NpgsqlParameter.PositionalName)); } + [Test] + public void Clone_sets_correct_collection() + { + var cmd = new NpgsqlCommand(); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = 42 }); + Assert.That(cmd.Parameters.Single().Collection, Is.SameAs(cmd.Parameters)); + + cmd = cmd.Clone(); + Assert.That(cmd.Parameters.Single().Collection, Is.SameAs(cmd.Parameters)); + } + + public NpgsqlParameterCollectionTests(CompatMode compatMode) { _compatMode = compatMode; diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index fe9f5f96b5..23c59c7a95 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -1,9 +1,9 @@ using NpgsqlTypes; using NUnit.Framework; using System; -using System.Collections.Generic; using System.Data; using System.Data.Common; +using System.Threading.Tasks; namespace Npgsql.Tests; @@ -109,8 +109,8 @@ public void Cannot_infer_data_type_name_from_NpgsqlDbType_for_unknown_range() [Test] public void Infer_data_type_name_from_ClrType() { - var p = new NpgsqlParameter("p1", new Dictionary()); - Assert.That(p.DataTypeName, Is.EqualTo("hstore")); + var p = new NpgsqlParameter("p1", Array.Empty()); + Assert.That(p.DataTypeName, Is.EqualTo("bytea")); } [Test] @@ -132,7 +132,7 @@ public void Setting_NpgsqlDbType_sets_DbType() [Test] public void Setting_value_does_not_change_DbType() { - var p = new NpgsqlParameter { DbType = DbType.String, NpgsqlDbType = NpgsqlDbType.Bytea }; + var p = new NpgsqlParameter { DbType = DbType.Binary, NpgsqlDbType = NpgsqlDbType.Bytea }; p.Value = 8; Assert.That(p.DbType, Is.EqualTo(DbType.Binary)); Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea)); @@ -146,17 +146,17 @@ public void Setting_value_does_not_change_DbType() public void Constructor1() { var p = new NpgsqlParameter(); - Assert.AreEqual(DbType.Object, p.DbType, "DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "Direction"); - Assert.IsFalse(p.IsNullable, "IsNullable"); - Assert.AreEqual(string.Empty, p.ParameterName, "ParameterName"); - Assert.AreEqual(0, p.Precision, "Precision"); - Assert.AreEqual(0, p.Scale, "Scale"); - Assert.AreEqual(0, p.Size, "Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "NpgsqlDbType"); - Assert.IsNull(p.Value, "Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "Direction"); + Assert.That(p.IsNullable, Is.False, "IsNullable"); + Assert.That(p.ParameterName, Is.Empty, "ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "Scale"); + Assert.That(p.Size, Is.EqualTo(0), "Size"); + Assert.That(p.SourceColumn, Is.Empty, "SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "NpgsqlDbType"); + Assert.That(p.Value, Is.Null, "Value"); } [Test] @@ -165,51 +165,51 @@ public void Constructor2_Value_DateTime() var value = new DateTime(2004, 8, 24); var p = new NpgsqlParameter("address", value); - Assert.AreEqual(DbType.DateTime2, p.DbType, "B:DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "B:Direction"); - Assert.IsFalse(p.IsNullable, "B:IsNullable"); - Assert.AreEqual("address", p.ParameterName, "B:ParameterName"); - Assert.AreEqual(0, p.Precision, "B:Precision"); - Assert.AreEqual(0, p.Scale, "B:Scale"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "B:DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "B:Direction"); + Assert.That(p.IsNullable, Is.False, "B:IsNullable"); + Assert.That(p.ParameterName, Is.EqualTo("address"), "B:ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "B:Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "B:Scale"); //Assert.AreEqual (0, p.Size, "B:Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "B:SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "B:SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "B:NpgsqlDbType"); - Assert.AreEqual(value, p.Value, "B:Value"); + Assert.That(p.SourceColumn, Is.Empty, "B:SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "B:SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "B:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(value), "B:Value"); } [Test] public void Constructor2_Value_DBNull() { var p = new NpgsqlParameter("address", DBNull.Value); - Assert.AreEqual(DbType.Object, p.DbType, "B:DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "B:Direction"); - Assert.IsFalse(p.IsNullable, "B:IsNullable"); - Assert.AreEqual("address", p.ParameterName, "B:ParameterName"); - Assert.AreEqual(0, p.Precision, "B:Precision"); - Assert.AreEqual(0, p.Scale, "B:Scale"); - Assert.AreEqual(0, p.Size, "B:Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "B:SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "B:SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "B:NpgsqlDbType"); - Assert.AreEqual(DBNull.Value, p.Value, "B:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "B:DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "B:Direction"); + Assert.That(p.IsNullable, Is.False, "B:IsNullable"); + Assert.That(p.ParameterName, Is.EqualTo("address"), "B:ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "B:Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "B:Scale"); + Assert.That(p.Size, Is.EqualTo(0), "B:Size"); + Assert.That(p.SourceColumn, Is.Empty, "B:SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "B:SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "B:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(DBNull.Value), "B:Value"); } [Test] public void Constructor2_Value_null() { var p = new NpgsqlParameter("address", null); - Assert.AreEqual(DbType.Object, p.DbType, "A:DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "A:Direction"); - Assert.IsFalse(p.IsNullable, "A:IsNullable"); - Assert.AreEqual("address", p.ParameterName, "A:ParameterName"); - Assert.AreEqual(0, p.Precision, "A:Precision"); - Assert.AreEqual(0, p.Scale, "A:Scale"); - Assert.AreEqual(0, p.Size, "A:Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "A:SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "A:SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "A:NpgsqlDbType"); - Assert.IsNull(p.Value, "A:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "A:DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "A:Direction"); + Assert.That(p.IsNullable, Is.False, "A:IsNullable"); + Assert.That(p.ParameterName, Is.EqualTo("address"), "A:ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "A:Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "A:Scale"); + Assert.That(p.Size, Is.EqualTo(0), "A:Size"); + Assert.That(p.SourceColumn, Is.Empty, "A:SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "A:SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "A:NpgsqlDbType"); + Assert.That(p.Value, Is.Null, "A:Value"); } [Test] @@ -219,20 +219,20 @@ public void Constructor7() var p1 = new NpgsqlParameter("p1Name", NpgsqlDbType.Varchar, 20, "srcCol", ParameterDirection.InputOutput, false, 0, 0, DataRowVersion.Original, "foo"); - Assert.AreEqual(DbType.String, p1.DbType, "DbType"); - Assert.AreEqual(ParameterDirection.InputOutput, p1.Direction, "Direction"); - Assert.AreEqual(false, p1.IsNullable, "IsNullable"); + Assert.That(p1.DbType, Is.EqualTo(DbType.String), "DbType"); + Assert.That(p1.Direction, Is.EqualTo(ParameterDirection.InputOutput), "Direction"); + Assert.That(p1.IsNullable, Is.EqualTo(false), "IsNullable"); //Assert.AreEqual (999, p1.LocaleId, "#"); - Assert.AreEqual("p1Name", p1.ParameterName, "ParameterName"); - Assert.AreEqual(0, p1.Precision, "Precision"); - Assert.AreEqual(0, p1.Scale, "Scale"); - Assert.AreEqual(20, p1.Size, "Size"); - Assert.AreEqual("srcCol", p1.SourceColumn, "SourceColumn"); - Assert.AreEqual(false, p1.SourceColumnNullMapping, "SourceColumnNullMapping"); - Assert.AreEqual(DataRowVersion.Original, p1.SourceVersion, "SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Varchar, p1.NpgsqlDbType, "NpgsqlDbType"); + Assert.That(p1.ParameterName, Is.EqualTo("p1Name"), "ParameterName"); + Assert.That(p1.Precision, Is.EqualTo(0), "Precision"); + Assert.That(p1.Scale, Is.EqualTo(0), "Scale"); + Assert.That(p1.Size, Is.EqualTo(20), "Size"); + Assert.That(p1.SourceColumn, Is.EqualTo("srcCol"), "SourceColumn"); + Assert.That(p1.SourceColumnNullMapping, Is.EqualTo(false), "SourceColumnNullMapping"); + Assert.That(p1.SourceVersion, Is.EqualTo(DataRowVersion.Original), "SourceVersion"); + Assert.That(p1.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Varchar), "NpgsqlDbType"); //Assert.AreEqual (3210, p1.NpgsqlValue, "#"); - Assert.AreEqual("foo", p1.Value, "Value"); + Assert.That(p1.Value, Is.EqualTo("foo"), "Value"); //Assert.AreEqual ("database", p1.XmlSchemaCollectionDatabase, "XmlSchemaCollectionDatabase"); //Assert.AreEqual ("name", p1.XmlSchemaCollectionName, "XmlSchemaCollectionName"); //Assert.AreEqual ("schema", p1.XmlSchemaCollectionOwningSchema, "XmlSchemaCollectionOwningSchema"); @@ -262,22 +262,22 @@ public void Clone() }; var actual = expected.Clone(); - Assert.AreEqual(expected.Value, actual.Value); - Assert.AreEqual(expected.ParameterName, actual.ParameterName); + Assert.That(actual.Value, Is.EqualTo(expected.Value)); + Assert.That(actual.ParameterName, Is.EqualTo(expected.ParameterName)); - Assert.AreEqual(expected.DbType, actual.DbType); - Assert.AreEqual(expected.NpgsqlDbType, actual.NpgsqlDbType); - Assert.AreEqual(expected.DataTypeName, actual.DataTypeName); + Assert.That(actual.DbType, Is.EqualTo(expected.DbType)); + Assert.That(actual.NpgsqlDbType, Is.EqualTo(expected.NpgsqlDbType)); + Assert.That(actual.DataTypeName, Is.EqualTo(expected.DataTypeName)); - Assert.AreEqual(expected.Direction, actual.Direction); - Assert.AreEqual(expected.IsNullable, actual.IsNullable); - Assert.AreEqual(expected.Precision, actual.Precision); - Assert.AreEqual(expected.Scale, actual.Scale); - Assert.AreEqual(expected.Size, actual.Size); + Assert.That(actual.Direction, Is.EqualTo(expected.Direction)); + Assert.That(actual.IsNullable, Is.EqualTo(expected.IsNullable)); + Assert.That(actual.Precision, Is.EqualTo(expected.Precision)); + Assert.That(actual.Scale, Is.EqualTo(expected.Scale)); + Assert.That(actual.Size, Is.EqualTo(expected.Size)); - Assert.AreEqual(expected.SourceVersion, actual.SourceVersion); - Assert.AreEqual(expected.SourceColumn, actual.SourceColumn); - Assert.AreEqual(expected.SourceColumnNullMapping, actual.SourceColumnNullMapping); + Assert.That(actual.SourceVersion, Is.EqualTo(expected.SourceVersion)); + Assert.That(actual.SourceColumn, Is.EqualTo(expected.SourceColumn)); + Assert.That(actual.SourceColumnNullMapping, Is.EqualTo(expected.SourceColumnNullMapping)); } [Test] @@ -304,162 +304,131 @@ public void Clone_generic() }; var actual = (NpgsqlParameter)expected.Clone(); - Assert.AreEqual(expected.Value, actual.Value); - Assert.AreEqual(expected.TypedValue, actual.TypedValue); - Assert.AreEqual(expected.ParameterName, actual.ParameterName); + Assert.That(actual.Value, Is.EqualTo(expected.Value)); + Assert.That(actual.TypedValue, Is.EqualTo(expected.TypedValue)); + Assert.That(actual.ParameterName, Is.EqualTo(expected.ParameterName)); - Assert.AreEqual(expected.DbType, actual.DbType); - Assert.AreEqual(expected.NpgsqlDbType, actual.NpgsqlDbType); - Assert.AreEqual(expected.DataTypeName, actual.DataTypeName); + Assert.That(actual.DbType, Is.EqualTo(expected.DbType)); + Assert.That(actual.NpgsqlDbType, Is.EqualTo(expected.NpgsqlDbType)); + Assert.That(actual.DataTypeName, Is.EqualTo(expected.DataTypeName)); - Assert.AreEqual(expected.Direction, actual.Direction); - Assert.AreEqual(expected.IsNullable, actual.IsNullable); - Assert.AreEqual(expected.Precision, actual.Precision); - Assert.AreEqual(expected.Scale, actual.Scale); - Assert.AreEqual(expected.Size, actual.Size); + Assert.That(actual.Direction, Is.EqualTo(expected.Direction)); + Assert.That(actual.IsNullable, Is.EqualTo(expected.IsNullable)); + Assert.That(actual.Precision, Is.EqualTo(expected.Precision)); + Assert.That(actual.Scale, Is.EqualTo(expected.Scale)); + Assert.That(actual.Size, Is.EqualTo(expected.Size)); - Assert.AreEqual(expected.SourceVersion, actual.SourceVersion); - Assert.AreEqual(expected.SourceColumn, actual.SourceColumn); - Assert.AreEqual(expected.SourceColumnNullMapping, actual.SourceColumnNullMapping); + Assert.That(actual.SourceVersion, Is.EqualTo(expected.SourceVersion)); + Assert.That(actual.SourceColumn, Is.EqualTo(expected.SourceColumn)); + Assert.That(actual.SourceColumnNullMapping, Is.EqualTo(expected.SourceColumnNullMapping)); } #endregion - [Test] - [Ignore("")] - public void InferType_invalid_throws() - { - var notsupported = new object[] - { - ushort.MaxValue, - uint.MaxValue, - ulong.MaxValue, - sbyte.MaxValue, - new NpgsqlParameter() - }; - - var param = new NpgsqlParameter(); - - for (var i = 0; i < notsupported.Length; i++) - { - try - { - param.Value = notsupported[i]; - Assert.Fail("#A1:" + i); - } - catch (FormatException) - { - // appears to be bug in .NET 1.1 while - // constructing exception message - } - catch (ArgumentException ex) - { - // The parameter data type of ... is invalid - Assert.AreEqual(typeof(ArgumentException), ex.GetType(), "#A2"); - Assert.IsNull(ex.InnerException, "#A3"); - Assert.IsNotNull(ex.Message, "#A4"); - Assert.IsNull(ex.ParamName, "#A5"); - } - } - } - [Test] // bug #320196 public void Parameter_null() { var param = new NpgsqlParameter("param", NpgsqlDbType.Numeric); - Assert.AreEqual(0, param.Scale, "#A1"); + Assert.That(param.Scale, Is.EqualTo(0), "#A1"); param.Value = DBNull.Value; - Assert.AreEqual(0, param.Scale, "#A2"); + Assert.That(param.Scale, Is.EqualTo(0), "#A2"); param = new NpgsqlParameter("param", NpgsqlDbType.Integer); - Assert.AreEqual(0, param.Scale, "#B1"); + Assert.That(param.Scale, Is.EqualTo(0), "#B1"); param.Value = DBNull.Value; - Assert.AreEqual(0, param.Scale, "#B2"); + Assert.That(param.Scale, Is.EqualTo(0), "#B2"); } [Test] - [Ignore("")] public void Parameter_type() { NpgsqlParameter p; // If Type is not set, then type is inferred from the value // assigned. The Type should be inferred everytime Value is assigned - // If value is null or DBNull, then the current Type should be reset to Text. - p = new NpgsqlParameter(); - Assert.AreEqual(DbType.String, p.DbType, "#A1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#A2"); + // If value is null or DBNull, then the current Type should be reset to Unknown (DbType.Object and NpgsqlDbType.Unknown). + p = new NpgsqlParameter { Value = "" }; + Assert.That(p.DbType, Is.EqualTo(DbType.String), "#A1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#A2"); p.Value = DBNull.Value; - Assert.AreEqual(DbType.String, p.DbType, "#B1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#B2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#B1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#B2"); p.Value = 1; - Assert.AreEqual(DbType.Int32, p.DbType, "#C1"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#C2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#C1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#C2"); p.Value = DBNull.Value; - Assert.AreEqual(DbType.String, p.DbType, "#D1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#D2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#D1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#D2"); p.Value = new byte[] { 0x0a }; - Assert.AreEqual(DbType.Binary, p.DbType, "#E1"); - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#E2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Binary), "#E1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#E2"); p.Value = null; - Assert.AreEqual(DbType.String, p.DbType, "#F1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#F2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#F1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#F2"); p.Value = DateTime.Now; - Assert.AreEqual(DbType.DateTime, p.DbType, "#G1"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#G2"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "#G1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#G2"); p.Value = null; - Assert.AreEqual(DbType.String, p.DbType, "#H1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#H2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#H1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#H2"); // If DbType is set, then the NpgsqlDbType should not be // inferred from the value assigned. p = new NpgsqlParameter(); p.DbType = DbType.DateTime; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I1"); p.Value = 1; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I2"); p.Value = null; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I3"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I3"); p.Value = DBNull.Value; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I4"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I4"); // If NpgsqlDbType is set, then the DbType should not be // inferred from the value assigned. p = new NpgsqlParameter(); p.NpgsqlDbType = NpgsqlDbType.Bytea; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J1"); p.Value = 1; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J2"); p.Value = null; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J3"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J3"); p.Value = DBNull.Value; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J4"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J4"); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5428")] + public async Task Match_param_index_case_insensitively() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p,@P", conn); + cmd.Parameters.AddWithValue("p", "Hello world"); + await cmd.ExecuteNonQueryAsync(); } [Test] - [Ignore("")] public void ParameterName() { var p = new NpgsqlParameter(); p.ParameterName = "name"; - Assert.AreEqual("name", p.ParameterName, "#A:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#A:SourceColumn"); + Assert.That(p.ParameterName, Is.EqualTo("name"), "#A:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#A:SourceColumn"); p.ParameterName = null; - Assert.AreEqual(string.Empty, p.ParameterName, "#B:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#B:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#B:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#B:SourceColumn"); p.ParameterName = " "; - Assert.AreEqual(" ", p.ParameterName, "#C:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#C:SourceColumn"); + Assert.That(p.ParameterName, Is.EqualTo(" "), "#C:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#C:SourceColumn"); p.ParameterName = " name "; - Assert.AreEqual(" name ", p.ParameterName, "#D:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#D:SourceColumn"); + Assert.That(p.ParameterName, Is.EqualTo(" name "), "#D:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#D:SourceColumn"); p.ParameterName = string.Empty; - Assert.AreEqual(string.Empty, p.ParameterName, "#E:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#E:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#E:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#E:SourceColumn"); } [Test] @@ -470,59 +439,59 @@ public void ResetDbType() //Parameter with an assigned value but no DbType specified p = new NpgsqlParameter("foo", 42); p.ResetDbType(); - Assert.AreEqual(DbType.Int32, p.DbType, "#A:DbType"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#A:NpgsqlDbType"); - Assert.AreEqual(42, p.Value, "#A:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#A:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#A:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(42), "#A:Value"); p.DbType = DbType.DateTime; //assigning a DbType - Assert.AreEqual(DbType.DateTime, p.DbType, "#B:DbType1"); - Assert.AreEqual(NpgsqlDbType.TimestampTz, p.NpgsqlDbType, "#B:SqlDbType1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime), "#B:DbType1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#B:SqlDbType1"); p.ResetDbType(); - Assert.AreEqual(DbType.Int32, p.DbType, "#B:DbType2"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#B:SqlDbtype2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#B:DbType2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#B:SqlDbtype2"); //Parameter with an assigned NpgsqlDbType but no specified value p = new NpgsqlParameter("foo", NpgsqlDbType.Integer); p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#C:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#C:NpgsqlDbType"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#C:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#C:NpgsqlDbType"); p.NpgsqlDbType = NpgsqlDbType.TimestampTz; //assigning a NpgsqlDbType - Assert.AreEqual(DbType.DateTime, p.DbType, "#D:DbType1"); - Assert.AreEqual(NpgsqlDbType.TimestampTz, p.NpgsqlDbType, "#D:SqlDbType1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime), "#D:DbType1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#D:SqlDbType1"); p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#D:DbType2"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#D:SqlDbType2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#D:DbType2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#D:SqlDbType2"); p = new NpgsqlParameter(); p.Value = DateTime.MaxValue; - Assert.AreEqual(DbType.DateTime2, p.DbType, "#E:DbType1"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#E:SqlDbType1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "#E:DbType1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#E:SqlDbType1"); p.Value = null; p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#E:DbType2"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#E:SqlDbType2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#E:DbType2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#E:SqlDbType2"); p = new NpgsqlParameter("foo", NpgsqlDbType.Varchar); p.Value = DateTime.MaxValue; p.ResetDbType(); - Assert.AreEqual(DbType.DateTime2, p.DbType, "#F:DbType"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#F:NpgsqlDbType"); - Assert.AreEqual(DateTime.MaxValue, p.Value, "#F:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "#F:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#F:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(DateTime.MaxValue), "#F:Value"); p = new NpgsqlParameter("foo", NpgsqlDbType.Varchar); p.Value = DBNull.Value; p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#G:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#G:NpgsqlDbType"); - Assert.AreEqual(DBNull.Value, p.Value, "#G:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#G:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#G:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(DBNull.Value), "#G:Value"); p = new NpgsqlParameter("foo", NpgsqlDbType.Varchar); p.Value = null; p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#G:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#G:NpgsqlDbType"); - Assert.IsNull(p.Value, "#G:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#G:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#G:NpgsqlDbType"); + Assert.That(p.Value, Is.Null, "#G:Value"); } [Test] @@ -530,29 +499,28 @@ public void ParameterName_retains_prefix() => Assert.That(new NpgsqlParameter("@p", DbType.String).ParameterName, Is.EqualTo("@p")); [Test] - [Ignore("")] public void SourceColumn() { var p = new NpgsqlParameter(); p.SourceColumn = "name"; - Assert.AreEqual(string.Empty, p.ParameterName, "#A:ParameterName"); - Assert.AreEqual("name", p.SourceColumn, "#A:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#A:ParameterName"); + Assert.That(p.SourceColumn, Is.EqualTo("name"), "#A:SourceColumn"); p.SourceColumn = null; - Assert.AreEqual(string.Empty, p.ParameterName, "#B:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#B:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#B:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#B:SourceColumn"); p.SourceColumn = " "; - Assert.AreEqual(string.Empty, p.ParameterName, "#C:ParameterName"); - Assert.AreEqual(" ", p.SourceColumn, "#C:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#C:ParameterName"); + Assert.That(p.SourceColumn, Is.EqualTo(" "), "#C:SourceColumn"); p.SourceColumn = " name "; - Assert.AreEqual(string.Empty, p.ParameterName, "#D:ParameterName"); - Assert.AreEqual(" name ", p.SourceColumn, "#D:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#D:ParameterName"); + Assert.That(p.SourceColumn, Is.EqualTo(" name "), "#D:SourceColumn"); p.SourceColumn = string.Empty; - Assert.AreEqual(string.Empty, p.ParameterName, "#E:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#E:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#E:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#E:SourceColumn"); } [Test] @@ -560,8 +528,8 @@ public void Bug1011100_NpgsqlDbType() { var p = new NpgsqlParameter(); p.Value = DBNull.Value; - Assert.AreEqual(DbType.Object, p.DbType, "#A:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#A:NpgsqlDbType"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#A:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#A:NpgsqlDbType"); // Now change parameter value. // Note that as we didn't explicitly specified a dbtype, the dbtype property should change when @@ -569,8 +537,8 @@ public void Bug1011100_NpgsqlDbType() p.Value = 8; - Assert.AreEqual(DbType.Int32, p.DbType, "#A:DbType"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#A:NpgsqlDbType"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#A:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#A:NpgsqlDbType"); //Assert.AreEqual(3510, p.Value, "#A:Value"); //p.NpgsqlDbType = NpgsqlDbType.Varchar; @@ -598,19 +566,19 @@ public void NpgsqlParameter_Clone() var newParam = param.Clone(); - Assert.AreEqual(param.Value, newParam.Value); - Assert.AreEqual(param.Precision, newParam.Precision); - Assert.AreEqual(param.Scale, newParam.Scale); - Assert.AreEqual(param.Size, newParam.Size); - Assert.AreEqual(param.Direction, newParam.Direction); - Assert.AreEqual(param.IsNullable, newParam.IsNullable); - Assert.AreEqual(param.ParameterName, newParam.ParameterName); - Assert.AreEqual(param.TrimmedName, newParam.TrimmedName); - Assert.AreEqual(param.SourceColumn, newParam.SourceColumn); - Assert.AreEqual(param.SourceVersion, newParam.SourceVersion); - Assert.AreEqual(param.NpgsqlValue, newParam.NpgsqlValue); - Assert.AreEqual(param.SourceColumnNullMapping, newParam.SourceColumnNullMapping); - Assert.AreEqual(param.NpgsqlValue, newParam.NpgsqlValue); + Assert.That(newParam.Value, Is.EqualTo(param.Value)); + Assert.That(newParam.Precision, Is.EqualTo(param.Precision)); + Assert.That(newParam.Scale, Is.EqualTo(param.Scale)); + Assert.That(newParam.Size, Is.EqualTo(param.Size)); + Assert.That(newParam.Direction, Is.EqualTo(param.Direction)); + Assert.That(newParam.IsNullable, Is.EqualTo(param.IsNullable)); + Assert.That(newParam.ParameterName, Is.EqualTo(param.ParameterName)); + Assert.That(newParam.TrimmedName, Is.EqualTo(param.TrimmedName)); + Assert.That(newParam.SourceColumn, Is.EqualTo(param.SourceColumn)); + Assert.That(newParam.SourceVersion, Is.EqualTo(param.SourceVersion)); + Assert.That(newParam.NpgsqlValue, Is.EqualTo(param.NpgsqlValue)); + Assert.That(newParam.SourceColumnNullMapping, Is.EqualTo(param.SourceColumnNullMapping)); + Assert.That(newParam.NpgsqlValue, Is.EqualTo(param.NpgsqlValue)); } @@ -622,7 +590,7 @@ public void Precision_via_interface() paramIface.Precision = 42; - Assert.AreEqual((byte)42, paramIface.Precision); + Assert.That(paramIface.Precision, Is.EqualTo((byte)42)); } [Test] @@ -633,7 +601,7 @@ public void Precision_via_base_class() paramBase.Precision = 42; - Assert.AreEqual((byte)42, paramBase.Precision); + Assert.That(paramBase.Precision, Is.EqualTo((byte)42)); } [Test] @@ -644,7 +612,7 @@ public void Scale_via_interface() paramIface.Scale = 42; - Assert.AreEqual((byte)42, paramIface.Scale); + Assert.That(paramIface.Scale, Is.EqualTo((byte)42)); } [Test] @@ -655,7 +623,7 @@ public void Scale_via_base_class() paramBase.Scale = 42; - Assert.AreEqual((byte)42, paramBase.Scale); + Assert.That(paramBase.Scale, Is.EqualTo((byte)42)); } [Test] @@ -684,6 +652,81 @@ public void Null_value_with_nullable_type() Assert.That(reader.GetFieldValue(0), Is.Null); } + [Test] + public void DBNull_reuses_type_info([Values]bool generic) + { + var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var typeInfo, out _); + Assert.That(typeInfo, Is.Not.Null); + + // Make sure we don't reset the type info when setting DBNull. + param.Value = DBNull.Value; + param.GetResolutionInfo(out var secondTypeInfo, out _); + Assert.That(secondTypeInfo, Is.SameAs(typeInfo)); + + // Make sure we don't resolve a different type info either. + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var thirdTypeInfo, out _); + Assert.That(thirdTypeInfo, Is.SameAs(secondTypeInfo)); + } + + [Test] + public void DBNull_followed_by_non_null_reresolves([Values]bool generic) + { + var param = generic ? new NpgsqlParameter { Value = DBNull.Value } : new NpgsqlParameter { Value = DBNull.Value }; + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var typeInfo, out _); + Assert.That(typeInfo, Is.Not.Null); + + param.Value = "value"; + param.GetResolutionInfo(out var secondTypeInfo, out _); + Assert.That(secondTypeInfo, Is.Null); + + // Make sure we don't resolve the same type info either. + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var thirdTypeInfo, out _); + Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); + } + + [Test] + public void Changing_value_type_reresolves([Values]bool generic) + { + var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var typeInfo, out _); + Assert.That(typeInfo, Is.Not.Null); + + param.Value = 1; + param.GetResolutionInfo(out var secondTypeInfo, out _); + Assert.That(secondTypeInfo, Is.Null); + + // Make sure we don't resolve a different type info either. + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var thirdTypeInfo, out _); + Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); + } + + [Test] + public void DataTypeName_prioritized_over_NpgsqlDbType([Values]bool generic) + { + var param = generic ? new NpgsqlParameter + { + NpgsqlDbType = NpgsqlDbType.Integer, + DataTypeName = "text", + Value = "value" + } : new NpgsqlParameter + { + NpgsqlDbType = NpgsqlDbType.Integer, + DataTypeName = "text", + Value = "value" + }; + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); + param.GetResolutionInfo(out var typeInfo, out _); + Assert.That(typeInfo, Is.Not.Null); + Assert.That(typeInfo.PgTypeId, Is.EqualTo(DataSource.CurrentReloadableState.SerializerOptions.TextPgTypeId)); + } + #if NeedsPorting [Test] [Category ("NotWorking")] @@ -796,4 +839,11 @@ public void LocaleId () Assert.AreEqual(15, parameter.LocaleId, "#2"); } #endif + + [OneTimeSetUp] + public async Task Bootstrap() + { + // Bootstrap datasource. + await using (var _ = await OpenConnectionAsync()) {} + } } diff --git a/test/Npgsql.Tests/PgPassEntryTests.cs b/test/Npgsql.Tests/PgPassEntryTests.cs index 53fafd348d..60678e70bf 100644 --- a/test/Npgsql.Tests/PgPassEntryTests.cs +++ b/test/Npgsql.Tests/PgPassEntryTests.cs @@ -1,10 +1,9 @@ -using System; +using System; using NUnit.Framework; using NUnit.Framework.Constraints; namespace Npgsql.Tests; -[TestFixture] public class PgPassEntryTests { [Test] @@ -14,11 +13,11 @@ public void Parses_well_formed_entry() var entry = PgPassFile.Entry.Parse(input); Assert.That(entry, Is.Not.Null); - Assert.That("test", Is.EqualTo(entry.Host)); - Assert.That(1234, Is.EqualTo(entry.Port)); - Assert.That("test2", Is.EqualTo(entry.Database)); - Assert.That("test3", Is.EqualTo(entry.Username)); - Assert.That("test4", Is.EqualTo(entry.Password)); + Assert.That(entry.Host, Is.EqualTo("test")); + Assert.That(entry.Port, Is.EqualTo(1234)); + Assert.That(entry.Database, Is.EqualTo("test2")); + Assert.That(entry.Username, Is.EqualTo("test3")); + Assert.That(entry.Password, Is.EqualTo("test4")); } [Test] @@ -37,11 +36,11 @@ public void Escaped_characters() var entry = PgPassFile.Entry.Parse(input); Assert.That(entry, Is.Not.Null); - Assert.That("t:est", Is.EqualTo(entry.Host)); - Assert.That(1234, Is.EqualTo(entry.Port)); - Assert.That("test2", Is.EqualTo(entry.Database)); - Assert.That("test3", Is.EqualTo(entry.Username)); - Assert.That("test\\4", Is.EqualTo(entry.Password)); + Assert.That(entry.Host, Is.EqualTo("t:est")); + Assert.That(entry.Port, Is.EqualTo(1234)); + Assert.That(entry.Database, Is.EqualTo("test2")); + Assert.That(entry.Username, Is.EqualTo("test3")); + Assert.That(entry.Password, Is.EqualTo("test\\4")); } [Test] @@ -98,4 +97,4 @@ public void Match_true_for_null_query() Assert.That(isMatch, Is.True); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/PgPassFileTests.cs b/test/Npgsql.Tests/PgPassFileTests.cs index dd076b5dec..0e7d6f46ce 100644 --- a/test/Npgsql.Tests/PgPassFileTests.cs +++ b/test/Npgsql.Tests/PgPassFileTests.cs @@ -1,10 +1,9 @@ -using System.IO; +using System.IO; using System.Linq; using NUnit.Framework; namespace Npgsql.Tests; -[TestFixture] public class PgPassFileTests { [Test] @@ -51,4 +50,4 @@ public void DeleteTestFile() if (File.Exists(_pgpassFile)) File.Delete(_pgpassFile); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/PoolManagerTests.cs b/test/Npgsql.Tests/PoolManagerTests.cs index 08c38dcb78..25b79d1bbb 100644 --- a/test/Npgsql.Tests/PoolManagerTests.cs +++ b/test/Npgsql.Tests/PoolManagerTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; namespace Npgsql.Tests; @@ -40,7 +40,8 @@ public void Many_pools() [Test] public void ClearAllPools() { - using (OpenConnection()) {} + using (var conn = new NpgsqlConnection(ConnectionString)) + conn.Open(); // Now have one connection in the pool Assert.That(PoolManager.Pools.TryGetValue(ConnectionString, out var pool), Is.True); Assert.That(pool!.Statistics.Idle, Is.EqualTo(1)); @@ -54,9 +55,11 @@ public void ClearAllPools() public void ClearAllPools_with_busy() { NpgsqlDataSource? pool; - using (OpenConnection()) + using (var conn = new NpgsqlConnection(ConnectionString)) { - using (OpenConnection()) { } + conn.Open(); + using (var anotherConn = new NpgsqlConnection(ConnectionString)) + anotherConn.Open(); // We have one idle, one busy NpgsqlConnection.ClearAllPools(); diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index 2929884306..65901af14c 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -1,112 +1,88 @@ -using System; -using System.Collections.Generic; +using System; using System.Linq; using System.Net.Sockets; using System.Threading; using System.Threading.Tasks; using NUnit.Framework; -using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; -[NonParallelizable] class PoolTests : TestBase { [Test] - public void MinPoolSize_equals_MaxPoolSize() + public async Task MinPoolSize_equals_MaxPoolSize() { - using var conn = CreateConnection(new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(MinPoolSize_equals_MaxPoolSize), - MinPoolSize = 30, - MaxPoolSize = 30 - }.ToString()); - conn.Open(); + csb.MinPoolSize = 30; + csb.MaxPoolSize = 30; + }); + await using var conn = await dataSource.OpenConnectionAsync(); } [Test] public void MinPoolSize_bigger_than_MaxPoolSize_throws() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + => Assert.ThrowsAsync(async () => { - ApplicationName = nameof(MinPoolSize_bigger_than_MaxPoolSize_throws), - MinPoolSize = 2, - MaxPoolSize = 1 - }.ToString(); - - Assert.Throws(() => CreateConnection(connString)); - } + await using var dataSource = CreateDataSource(csb => + { + csb.MinPoolSize = 2; + csb.MaxPoolSize = 1; + }); + }); [Test] - public void Reuse_connector_before_creating_new() + public async Task Reuse_connector_before_creating_new() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Reuse_connector_before_creating_new), - }.ToString(); - - using var conn = CreateConnection(connString); - conn.Open(); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); var backendId = conn.Connector!.BackendProcessId; - conn.Close(); - conn.Open(); + await conn.CloseAsync(); + await conn.OpenAsync(); Assert.That(conn.Connector.BackendProcessId, Is.EqualTo(backendId)); } [Test] - public void Get_connector_from_exhausted_pool() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Get_connector_from_exhausted_pool), - MaxPoolSize = 1, - Timeout = 0 - }.ToString(); - - using var conn1 = CreateConnection(connString); - conn1.Open(); - - // Pool is exhausted - using var conn2 = CreateConnection(connString); - _ = Task.Delay(1000).ContinueWith(_ => conn1.Close()); - conn2.Open(); - } - - //[Test, Explicit] - public async Task Get_connector_from_exhausted_pool_async() + public async Task Get_connector_from_exhausted_pool([Values(true, false)] bool async) { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(Get_connector_from_exhausted_pool_async), - MaxPoolSize = 1, - Timeout = 0 - }.ToString(); + csb.MaxPoolSize = 1; + csb.Timeout = 0; + }); - using var conn1 = CreateConnection(connString); - await conn1.OpenAsync(); + await using var conn1 = await dataSource.OpenConnectionAsync(); // Pool is exhausted - using var conn2 = CreateConnection(connString); - using (new Timer(o => conn1.Close(), null, 1000, Timeout.Infinite)) + await using var conn2 = dataSource.CreateConnection(); + _ = Task.Delay(1000).ContinueWith(async _ => + { + if (async) + await conn1.CloseAsync(); + else + conn1.Close(); + }); + if (async) await conn2.OpenAsync(); + else + conn2.Open(); } [Test] public async Task Timeout_getting_connector_from_exhausted_pool([Values(true, false)] bool async) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - Timeout = 2 - }; + csb.MaxPoolSize = 1; + csb.Timeout = 2; + }); - using var _ = CreateTempPool(csb, out var connString); - using (var conn1 = CreateConnection(connString)) + await using (var conn1 = dataSource.CreateConnection()) { await conn1.OpenAsync(); // Pool is now exhausted - await using var conn2 = CreateConnection(connString); + await using var conn2 = dataSource.CreateConnection(); var e = async ? Assert.ThrowsAsync(async () => await conn2.OpenAsync())! : Assert.Throws(() => conn2.Open())!; @@ -115,112 +91,69 @@ public async Task Timeout_getting_connector_from_exhausted_pool([Values(true, fa } // conn1 should now be back in the pool as idle - using (var conn3 = CreateConnection(connString)) - conn3.Open(); - } - - [Test] - public async Task Timeout_getting_connector_from_exhausted_pool_async() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Timeout_getting_connector_from_exhausted_pool_async), - MaxPoolSize = 1, - Timeout = 2 - }.ToString(); - - using (var conn1 = CreateConnection(connString)) - { - await conn1.OpenAsync(); - - // Pool is exhausted - using (var conn2 = CreateConnection(connString)) - Assert.That(async () => await conn2.OpenAsync(), Throws.Exception.TypeOf()); - } - // conn1 should now be back in the pool as idle - using (var conn3 = CreateConnection(connString)) - conn3.Open(); + await using var conn3 = await dataSource.OpenConnectionAsync(); } [Test] [Explicit("Timing-based")] public async Task OpenAsync_cancel() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(OpenAsync_cancel), - MaxPoolSize = 1, - }.ToString(); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn1 = await dataSource.OpenConnectionAsync(); - using var conn1 = CreateConnection(connString); - await conn1.OpenAsync(); - - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); // Pool is exhausted - using (var conn2 = CreateConnection(connString)) + await using (var conn2 = dataSource.CreateConnection()) { var cts = new CancellationTokenSource(1000); var openTask = conn2.OpenAsync(cts.Token); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); Assert.That(async () => await openTask, Throws.Exception.TypeOf()); } - AssertPoolState(pool, open: 1, idle: 0); - using (var conn2 = CreateConnection(connString)) - using (new Timer(o => conn1.Close(), null, 1000, Timeout.Infinite)) + AssertPoolState(dataSource, open: 1, idle: 0); + await using (var conn2 = dataSource.CreateConnection()) + await using (new Timer(o => conn1.Close(), null, 1000, Timeout.Infinite)) { await conn2.OpenAsync(); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); } - AssertPoolState(pool, open: 1, idle: 1); + AssertPoolState(dataSource, open: 1, idle: 1); } [Test, Description("Makes sure that when a pooled connection is closed it's properly reset, and that parameter settings aren't leaked")] - public void ResetOnClose() + public async Task ResetOnClose() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(ResetOnClose), - SearchPath = "public" - }.ToString(); - using var conn = CreateConnection(connString); - conn.Open(); - Assert.That(conn.ExecuteScalar("SHOW search_path"), Is.Not.Contains("pg_temp")); + await using var dataSource = CreateDataSource(csb => csb.SearchPath = "public"); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(await conn.ExecuteScalarAsync("SHOW search_path"), Is.Not.Contains("pg_temp")); var backendId = conn.Connector!.BackendProcessId; - conn.ExecuteNonQuery("SET search_path=pg_temp"); - conn.Close(); + await conn.ExecuteNonQueryAsync("SET search_path=pg_temp"); + await conn.CloseAsync(); - conn.Open(); + await conn.OpenAsync(); Assert.That(conn.Connector.BackendProcessId, Is.EqualTo(backendId)); - Assert.That(conn.ExecuteScalar("SHOW search_path"), Is.EqualTo("public")); + Assert.That(await conn.ExecuteScalarAsync("SHOW search_path"), Is.EqualTo("public")); } [Test] public void ConnectionPruningInterval_zero_throws() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + => Assert.ThrowsAsync(async () => { - ApplicationName = nameof(ConnectionPruningInterval_zero_throws), - ConnectionPruningInterval = 0 - }.ToString(); - - Assert.Throws(() => OpenConnection(connString)); - } + await using var dataSource = CreateDataSource(csb => csb.ConnectionPruningInterval = 0); + }); [Test] public void ConnectionPruningInterval_bigger_than_ConnectionIdleLifetime_throws() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + => Assert.ThrowsAsync(async () => { - ApplicationName = nameof(ConnectionPruningInterval_bigger_than_ConnectionIdleLifetime_throws), - ConnectionIdleLifetime = 1, - ConnectionPruningInterval = 2 - }.ToString(); - - Assert.Throws(() => OpenConnection(connString)); - } + await using var dataSource = CreateDataSource(csb => + { + csb.ConnectionIdleLifetime = 1; + csb.ConnectionPruningInterval = 2; + }); + }); [Theory, Explicit("Slow, and flaky under pressure, based on timing")] [TestCase(0, 2, 1, 2)] // min pool size 0, sample twice @@ -229,26 +162,24 @@ public void ConnectionPruningInterval_bigger_than_ConnectionIdleLifetime_throws( [TestCase(2, 3, 2, 2)] // test rounding up, should sample twice. [TestCase(2, 1, 1, 1)] // test sample once. [TestCase(2, 20, 3, 7)] // test high samples. - public void Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, int connectionPruningInterval, int samples) + public async Task Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, int connectionPruningInterval, int samples) { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(Prune_idle_connectors), - MinPoolSize = minPoolSize, - ConnectionIdleLifetime = connectionIdleLifeTime, - ConnectionPruningInterval = connectionPruningInterval - }.ToString(); + csb.MinPoolSize = minPoolSize; + csb.ConnectionIdleLifetime = connectionIdleLifeTime; + csb.ConnectionPruningInterval = connectionPruningInterval; + }); var connectionPruningIntervalMs = connectionPruningInterval * 1000; - using var conn1 = OpenConnection(connString); - using var conn2 = OpenConnection(connString); - using var conn3 = OpenConnection(connString); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); + await using var conn1 = await dataSource.OpenConnectionAsync(); + await using var conn2 = await dataSource.OpenConnectionAsync(); + await using var conn3 = await dataSource.OpenConnectionAsync(); - conn1.Close(); - conn2.Close(); - AssertPoolState(pool!, open: 3, idle: 2); + await conn1.CloseAsync(); + await conn2.CloseAsync(); + AssertPoolState(dataSource!, open: 3, idle: 2); var paddingMs = 100; // 100ms var sleepInterval = connectionPruningIntervalMs + paddingMs; @@ -259,7 +190,7 @@ public void Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, i total += sleepInterval; Thread.Sleep(sleepInterval); // ConnectionIdleLifetime not yet reached. - AssertPoolState(pool, open: 3, idle: 2); + AssertPoolState(dataSource, open: 3, idle: 2); } // final cycle to do pruning. @@ -267,65 +198,93 @@ public void Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, i // ConnectionIdleLifetime reached, we still have one connection open minimum, // and as a result we have minPoolSize - 1 idle connections. - AssertPoolState(pool, open: Math.Max(1, minPoolSize), idle: Math.Max(0, minPoolSize - 1)); + AssertPoolState(dataSource, open: Math.Max(1, minPoolSize), idle: Math.Max(0, minPoolSize - 1)); } - [Test, Description("Makes sure that when a waiting async open is is given a connection, the continuation is executed in the TP rather than on the closing thread")] - public void Close_releases_waiter_on_another_thread() + [Test] + [Explicit("Timing-based")] + public async Task Prune_counts_max_lifetime_exceeded() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Close_releases_waiter_on_another_thread), - MaxPoolSize = 1 - }.ToString(); - var conn1 = CreateConnection(connString); - try + await using var dataSource = CreateDataSource(csb => { - conn1.Open(); // Pool is now exhausted + csb.MinPoolSize = 0; + // Idle lifetime 2 seconds, 2 samples + csb.ConnectionIdleLifetime = 2; + csb.ConnectionPruningInterval = 1; + csb.ConnectionLifetime = 5; + }); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 1, idle: 0); + // conn1 will exceed max lifetime + await using var conn1 = await dataSource.OpenConnectionAsync(); - Func> asyncOpener = async () => - { - using (var conn2 = CreateConnection(connString)) - { - await conn2.OpenAsync(); - AssertPoolState(pool, open: 1, idle: 0); - } - AssertPoolState(pool, open: 1, idle: 1); - return Environment.CurrentManagedThreadId; - }; + // make conn1 4 seconds older than the others, so it exceeds max lifetime + Thread.Sleep(4000); - // Start an async open which will not complete as the pool is exhausted. - var asyncOpenerTask = asyncOpener(); - conn1.Close(); // Complete the async open by closing conn1 - var asyncOpenerThreadId = asyncOpenerTask.GetAwaiter().GetResult(); - AssertPoolState(pool, open: 1, idle: 1); + await using var conn2 = await dataSource.OpenConnectionAsync(); + await using var conn3 = await dataSource.OpenConnectionAsync(); - Assert.That(asyncOpenerThreadId, Is.Not.EqualTo(Environment.CurrentManagedThreadId)); - } - finally + await conn1.CloseAsync(); + await conn2.CloseAsync(); + AssertPoolState(dataSource, open: 3, idle: 2); + + // wait for 1 sample + Thread.Sleep(1000); + // ConnectionIdleLifetime not yet reached. + AssertPoolState(dataSource, open: 3, idle: 2); + + // close conn3, so we can see if too many connectors get pruned + await conn3.CloseAsync(); + + // wait for last sample + a bit more time for reliability + Thread.Sleep(1500); + + // ConnectionIdleLifetime reached + // - conn1 should have been closed due to max lifetime (but this should count as pruning) + // - conn2 or conn3 should have been closed due to idle pruning + // - conn3 or conn2 should remain + AssertPoolState(dataSource, open: 1, idle: 1); + } + + [Test, Description("Makes sure that when a waiting async open is is given a connection, the continuation is executed in the TP rather than on the closing thread")] + public async Task Close_releases_waiter_on_another_thread() + { + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn1 = await dataSource.OpenConnectionAsync(); // Pool is now exhausted + + AssertPoolState(dataSource, open: 1, idle: 0); + + Func> asyncOpener = async () => { - conn1.Close(); - NpgsqlConnection.ClearPool(conn1); - } + using (var conn2 = dataSource.CreateConnection()) + { + await conn2.OpenAsync(); + AssertPoolState(dataSource, open: 1, idle: 0); + } + AssertPoolState(dataSource, open: 1, idle: 1); + return Environment.CurrentManagedThreadId; + }; + + // Start an async open which will not complete as the pool is exhausted. + var asyncOpenerTask = asyncOpener(); + conn1.Close(); // Complete the async open by closing conn1 + var asyncOpenerThreadId = asyncOpenerTask.GetAwaiter().GetResult(); + AssertPoolState(dataSource, open: 1, idle: 1); + + Assert.That(asyncOpenerThreadId, Is.Not.EqualTo(Environment.CurrentManagedThreadId)); } [Test] //TODO: parallelize - public void Release_waiter_on_connection_failure() + public async Task Release_waiter_on_connection_failure() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - Port = 9999, - MaxPoolSize = 1 - }; + csb.Port = 9999; + csb.MaxPoolSize = 1; + }); - using var _ = CreateTempPool(csb, out var connString); var tasks = Enumerable.Range(0, 2).Select(i => Task.Run(async () => { - await using var conn = CreateConnection(connString); - await conn.OpenAsync(); + await using var conn = await dataSource.OpenConnectionAsync(); })).ToArray(); var ex = Assert.Throws(() => Task.WaitAll(tasks))!; @@ -341,19 +300,31 @@ public void ClearPool(int iterations) { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - ApplicationName = nameof(ClearPool) + ApplicationName = nameof(ClearPool) + iterations }.ToString(); - NpgsqlConnection conn; - for (var i = 0; i < iterations; i++) + NpgsqlConnection? conn = null; + try { - using (conn = OpenConnection(connString)) { } - // Now have one connection in the pool - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 1, idle: 1); + for (var i = 0; i < iterations; i++) + { + using (conn = new NpgsqlConnection(connString)) + { + conn.Open(); + } - NpgsqlConnection.ClearPool(conn); - AssertPoolState(pool, open: 0, idle: 0); + // Now have one connection in the pool + Assert.That(PoolManager.Pools.TryGetValue(connString, out var pool)); + AssertPoolState(pool, open: 1, idle: 1); + + NpgsqlConnection.ClearPool(conn); + AssertPoolState(pool, open: 0, idle: 0); + } + } + finally + { + if (conn is not null) + NpgsqlConnection.ClearPool(conn); } } @@ -365,16 +336,26 @@ public void ClearPool_with_busy() ApplicationName = nameof(ClearPool_with_busy) }.ToString(); - NpgsqlDataSource? pool; - using (var conn = OpenConnection(connString)) + var conn = new NpgsqlConnection(connString); + try { - NpgsqlConnection.ClearPool(conn); - // conn is still busy but should get closed when returned to the pool + NpgsqlDataSource? pool; + using (conn) + { + conn.Open(); + NpgsqlConnection.ClearPool(conn); + // conn is still busy but should get closed when returned to the pool - Assert.True(PoolManager.Pools.TryGetValue(connString, out pool)); - AssertPoolState(pool, open: 1, idle: 0); + Assert.That(PoolManager.Pools.TryGetValue(connString, out pool)); + AssertPoolState(pool, open: 1, idle: 0); + } + + AssertPoolState(pool, open: 0, idle: 0); + } + finally + { + NpgsqlConnection.ClearPool(conn); } - AssertPoolState(pool, open: 0, idle: 0); } [Test] @@ -384,26 +365,24 @@ public void ClearPool_with_no_pool() { ApplicationName = nameof(ClearPool_with_no_pool) }.ToString(); - using var conn = CreateConnection(connString); + using var conn = new NpgsqlConnection(connString); NpgsqlConnection.ClearPool(conn); } [Test, Description("https://github.com/npgsql/npgsql/commit/45e33ecef21f75f51a625c7b919a50da3ed8e920#r28239653")] public void Open_physical_failure() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(Open_physical_failure), - Port = 44444, - MaxPoolSize = 1 - }.ToString(); - using var conn = CreateConnection(connString); + csb.Port = 44444; + csb.MaxPoolSize = 1; + }); + using var conn = dataSource.CreateConnection(); for (var i = 0; i < 1; i++) Assert.That(() => conn.Open(), Throws.Exception .TypeOf() .With.InnerException.TypeOf()); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 0, idle: 0); + AssertPoolState(dataSource, open: 0, idle: 0); } //[Test, Explicit] @@ -411,13 +390,9 @@ public void Open_physical_failure() //[TestCase(10, 10, 30, false)] //[TestCase(10, 20, 30, true)] //[TestCase(10, 20, 30, false)] - public void Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async) + public async Task Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async) { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Exercise_pool), - MaxPoolSize = maxPoolSize - }.ToString(); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = maxPoolSize); Console.WriteLine($"Spinning up {numTasks} parallel tasks for {seconds} seconds (MaxPoolSize={maxPoolSize})..."); StopFlag = 0; @@ -425,7 +400,7 @@ public void Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async { while (StopFlag == 0) { - using var conn = CreateConnection(connString); + await using var conn = dataSource.CreateConnection(); if (async) await conn.OpenAsync(); else @@ -443,14 +418,8 @@ public void Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async [Test] public async Task ConnectionLifetime() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ConnectionLifetime = 1 - }; - - using var _ = CreateTempPool(builder, out var connectionString); - await using var conn = new NpgsqlConnection(connectionString); - await conn.OpenAsync(); + await using var dataSource = CreateDataSource(csb => csb.ConnectionLifetime = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var processId = conn.ProcessID; await conn.CloseAsync(); @@ -477,71 +446,27 @@ void AssertPoolState(NpgsqlDataSource? pool, int open, int idle) // With MaxPoolSize=1, opens many connections in parallel and executes a simple SELECT. Since there's only one // physical connection, all operations will be completely serialized [Test] - public Task OnePhysicalConnectionManyCommands() + public async Task OnePhysicalConnectionManyCommands() { const int numParallelCommands = 10000; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - MaxAutoPrepare = 5, - AutoPrepareMinUsages = 5, - Timeout = 0 - }.ToString(); + csb.MaxPoolSize = 1; + csb.MaxAutoPrepare = 5; + csb.AutoPrepareMinUsages = 5; + csb.Timeout = 0; + }); - return Task.WhenAll(Enumerable.Range(0, numParallelCommands) + await Task.WhenAll(Enumerable.Range(0, numParallelCommands) .Select(async i => { - using var conn = new NpgsqlConnection(connString); - await conn.OpenAsync(); - using var cmd = new NpgsqlCommand("SELECT " + i, conn); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT " + i, conn); var result = await cmd.ExecuteScalarAsync(); Assert.That(result, Is.EqualTo(i)); })); } - // When multiplexing, and the pool is totally saturated (at Max Pool Size and 0 idle connectors), we select - // the connector with the least commands in flight and execute on it. We must never select a connector with - // a pending transaction on it. - // TODO: Test not tested - [Test] - [Ignore("Multiplexing: fails")] - public void MultiplexedCommandDoesntGetExecutedOnTransactionedConnector() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1, - Timeout = 1 - }.ToString(); - - using var connWithTx = OpenConnection(connString); - using var tx = connWithTx.BeginTransaction(); - // connWithTx should now be bound with the only physical connector available. - // Any commands execute should timeout - - using var conn2 = OpenConnection(connString); - using var cmd = new NpgsqlCommand("SELECT 1", conn2); - Assert.ThrowsAsync(() => cmd.ExecuteScalarAsync()); - } - - protected override NpgsqlConnection CreateConnection(string? connectionString = null) - { - var conn = base.CreateConnection(connectionString); - _cleanup.Add(conn); - return conn; - } - - readonly List _cleanup = new(); - - [TearDown] - public void Cleanup() - { - foreach (var c in _cleanup) - { - NpgsqlConnection.ClearPool(c); - } - _cleanup.Clear(); - } - #endregion } diff --git a/test/Npgsql.Tests/PostgresTypeTests.cs b/test/Npgsql.Tests/PostgresTypeTests.cs index 644d839697..7c3945fb0a 100644 --- a/test/Npgsql.Tests/PostgresTypeTests.cs +++ b/test/Npgsql.Tests/PostgresTypeTests.cs @@ -1,7 +1,6 @@ using System.Linq; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.TypeMapping; using NUnit.Framework; namespace Npgsql.Tests; @@ -70,6 +69,6 @@ public async Task Multirange() async Task GetDatabaseInfo() { await using var conn = await OpenConnectionAsync(); - return conn.NpgsqlDataSource.TypeMapper.DatabaseInfo; + return conn.NpgsqlDataSource.CurrentReloadableState.DatabaseInfo; } } diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index 12f8d2e3b5..d09d3ac016 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -1,10 +1,13 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.BackendMessages; +using Npgsql.Internal.Postgres; +using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -13,6 +16,8 @@ namespace Npgsql.Tests; public class PrepareTests: TestBase { + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + [Test] public void Basic() { @@ -216,11 +221,8 @@ public void Double_prepare_different_sql() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/395")] public void Across_close_open_same_connector() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(PrepareTests) + '.' + nameof(Across_close_open_same_connector) - }; - using var conn = OpenConnectionAndUnprepare(csb); + using var dataSource = CreateDataSource(); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn); cmd.Prepare(); Assert.That(cmd.IsPrepared, Is.True); @@ -232,18 +234,14 @@ public void Across_close_open_same_connector() Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); cmd.Prepare(); Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); - NpgsqlConnection.ClearPool(conn); } [Test] public void Across_close_open_different_connector() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(PrepareTests) + '.' + nameof(Across_close_open_different_connector) - }.ToString(); - using var conn1 = new NpgsqlConnection(connString); - using var conn2 = new NpgsqlConnection(connString); + using var dataSource = CreateDataSource(); + using var conn1 = dataSource.CreateConnection(); + using var conn2 = dataSource.CreateConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); conn1.Open(); cmd.Prepare(); @@ -257,18 +255,14 @@ public void Across_close_open_different_connector() Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); // Execute unprepared cmd.Prepare(); Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); - NpgsqlConnection.ClearPool(conn1); } [Test] public void Reuse_prepared_statement() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(PrepareTests) + '.' + nameof(Reuse_prepared_statement) - }.ToString(); - using var conn1 = OpenConnection(connString); - var preparedStatement = ""; + using var dataSource = CreateDataSource(); + using var conn1 = dataSource.OpenConnection(); + var preparedStatement = Array.Empty(); using (var cmd1 = new NpgsqlCommand("SELECT @p", conn1)) { cmd1.Parameters.AddWithValue("p", 8); @@ -286,7 +280,6 @@ public void Reuse_prepared_statement() Assert.That(cmd2.InternalBatchCommands[0].PreparedStatement!.Name, Is.EqualTo(preparedStatement)); Assert.That(cmd2.ExecuteScalar(), Is.EqualTo(8)); } - NpgsqlConnection.ClearPool(conn1); } [Test] @@ -388,12 +381,12 @@ public void One_command_same_sql_twice() [Test] public void One_command_same_sql_auto_prepare() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 5, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnectionAndUnprepare(csb); + csb.MaxAutoPrepare = 5; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); var sql = new StringBuilder(); for (var i = 0; i < 2 + 1; i++) sql.Append("SELECT 1;"); @@ -469,7 +462,7 @@ public void Overloaded_sql() // SQL overloading is a pretty rare/exotic scenario. Handling it properly would involve keying // prepared statements not just by SQL but also by the parameter types, which would pointlessly - // increase allocations. Instead, the second execution simply reuns unprepared + // increase allocations. Instead, the second execution simply reruns unprepared AssertNumPreparedStatements(conn, 1); conn.UnprepareAll(); } @@ -535,12 +528,8 @@ public void Persistent_across_commands() [Test, Description("Basic persistent prepared system scenario. Checks that statement is not deallocated in the backend after connection close.")] public void Persistent_across_connections() { - var connSettings = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Persistent_across_connections) - }; - - using var conn = OpenConnectionAndUnprepare(connSettings); + using var dataSource = CreateDataSource(); + using var conn = dataSource.OpenConnection(); var processId = conn.ProcessID; AssertNumPreparedStatements(conn, 0); @@ -564,8 +553,6 @@ public void Persistent_across_connections() } AssertNumPreparedStatements(conn, 1, "Prepared statement deallocated"); Assert.That(GetPreparedStatements(conn).Single(), Is.EqualTo(stmtName), "Prepared statement name changed unexpectedly"); - - NpgsqlConnection.ClearPool(conn); } [Test, Description("Makes sure that calling Prepare() twice on a command does not deallocate or make a new one after the first prepared statement when command does not change")] @@ -672,7 +659,7 @@ public void Same_sql_different_params() using (var conn = OpenConnectionAndUnprepare()) using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { - throw new NotImplementedException("Problem: currentl setting NpgsqlParameter.Value clears/invalidates..."); + throw new NotImplementedException("Problem: current setting NpgsqlParameter.Value clears/invalidates..."); cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Integer)); cmd.Prepare(true); @@ -739,26 +726,14 @@ public void Prepare_multiple_commands_with_parameters() } [Test] - public void Multiplexing_not_supported() - { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { Multiplexing = true }; - using var conn = OpenConnection(builder); - using var cmd = new NpgsqlCommand("SELECT 1", conn); - - Assert.That(() => cmd.Prepare(), Throws.Exception.TypeOf()); - Assert.That(() => conn.UnprepareAll(), Throws.Exception.TypeOf()); - } - - [Test] - public async Task Explicitly_prepared_statement_invalidation() + public async Task Explicitly_prepared_statement_invalidation([Values] bool prepareAfterError, [Values] bool unprepareAfterError) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - await using var connection = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var connection = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(connection, "foo int"); await using var command = new NpgsqlCommand($"SELECT * FROM {table}", connection); @@ -769,24 +744,173 @@ public async Task Explicitly_prepared_statement_invalidation() // Since we've changed the table schema, the next execution of the prepared statement will error with 0A000 var exception = Assert.ThrowsAsync(() => command.ExecuteNonQueryAsync())!; Assert.That(exception.SqlState, Is.EqualTo(PostgresErrorCodes.FeatureNotSupported)); // cached plan must not change result type + Assert.That(command.IsPrepared, Is.False); + + if (unprepareAfterError) + { + // Just check that calling unprepare after error doesn't break anything + await command.UnprepareAsync(); + Assert.That(command.IsPrepared, Is.False); + } + + if (prepareAfterError) + { + // If we explicitly prepare after error, we should replace the previous prepared statement with a new one + await command.PrepareAsync(); + Assert.That(command.IsPrepared); + } // However, Npgsql should invalidate the prepared statement in this case, so the next execution should work Assert.DoesNotThrowAsync(() => command.ExecuteNonQueryAsync()); - // The command is unprepared, though. It's the user's responsibility to re-prepare if they wish. - Assert.False(command.IsPrepared); + if (!prepareAfterError) + { + // The command is unprepared, though. It's the user's responsibility to re-prepare if they wish. + Assert.That(command.IsPrepared, Is.False); + } } - NpgsqlConnection OpenConnectionAndUnprepare(string? connectionString = null) + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4920")] + public async Task Explicit_prepare_unprepare_many_queries() { - var conn = OpenConnection(connectionString); + // Set a specific buffer's size to trigger #4920 + await using var dataSource = CreateDataSource(csb => csb.WriteBufferSize = 5002); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + cmd.CommandText = string.Join(';', Enumerable.Range(1, 500).Select(x => $"SELECT {x}")); + await cmd.PrepareAsync(); + await cmd.UnprepareAsync(); + } + + [Test] + public async Task Explicitly_prepared_batch_sends_prepared_queries() + { + await using var postmaster = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + + await using var conn = await dataSource.OpenConnectionAsync(); + var server = await postmaster.WaitForServerConnection(); + + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = { new("SELECT 1"), new("SELECT 2") } + }; + + var prepareTask = batch.PrepareAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Parse, FrontendMessageCode.Describe, + FrontendMessageCode.Parse, FrontendMessageCode.Describe, + FrontendMessageCode.Sync); + + await server + .WriteParseComplete() + .WriteParameterDescription(new FieldDescription(Int4Oid)) + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteParseComplete() + .WriteParameterDescription(new FieldDescription(Int4Oid)) + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteReadyForQuery() + .FlushAsync(); + + await prepareTask; + + for (var i = 0; i < 2; i++) + await ExecutePreparedBatch(batch, server); + + async Task ExecutePreparedBatch(NpgsqlBatch batch, PgServerMock server) + { + var executeBatchTask = batch.ExecuteNonQueryAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Sync); + + await server + .WriteBindComplete() + .WriteCommandComplete() + .WriteBindComplete() + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await executeBatchTask; + } + } + + [Test] + public async Task Auto_prepared_batch_sends_prepared_queries() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + AutoPrepareMinUsages = 1, + MaxAutoPrepare = 10 + }; + await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + + await using var conn = await dataSource.OpenConnectionAsync(); + var server = await postmaster.WaitForServerConnection(); + + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = { new("SELECT 1"), new("SELECT 2") } + }; + + var firstBatchExecuteTask = batch.ExecuteNonQueryAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Parse, FrontendMessageCode.Bind, FrontendMessageCode.Describe, FrontendMessageCode.Execute, + FrontendMessageCode.Parse, FrontendMessageCode.Bind, FrontendMessageCode.Describe, FrontendMessageCode.Execute, + FrontendMessageCode.Sync); + + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteCommandComplete() + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await firstBatchExecuteTask; + + for (var i = 0; i < 2; i++) + await ExecutePreparedBatch(batch, server); + + async Task ExecutePreparedBatch(NpgsqlBatch batch, PgServerMock server) + { + var executeBatchTask = batch.ExecuteNonQueryAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Sync); + + await server + .WriteBindComplete() + .WriteCommandComplete() + .WriteBindComplete() + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await executeBatchTask; + } + } + + NpgsqlConnection OpenConnectionAndUnprepare() + { + var conn = OpenConnection(); conn.UnprepareAll(); return conn; } - NpgsqlConnection OpenConnectionAndUnprepare(NpgsqlConnectionStringBuilder csb) - => OpenConnectionAndUnprepare(csb.ToString()); - void AssertNumPreparedStatements(NpgsqlConnection conn, int expected) => Assert.That(conn.ExecuteScalar("SELECT COUNT(*) FROM pg_prepared_statements WHERE statement NOT LIKE '%FROM pg_prepared_statements%'"), Is.EqualTo(expected)); diff --git a/test/Npgsql.Tests/Properties/AssemblyInfo.cs b/test/Npgsql.Tests/Properties/AssemblyInfo.cs index f7cdcd188d..9b9dcef56f 100644 --- a/test/Npgsql.Tests/Properties/AssemblyInfo.cs +++ b/test/Npgsql.Tests/Properties/AssemblyInfo.cs @@ -1,7 +1,7 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; using NUnit.Framework; -[assembly: Parallelizable(ParallelScope.Children), Timeout(30000)] +[assembly: Parallelizable(ParallelScope.Children)] [assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index 2d76a39bf8..172e0d5af5 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -1,5 +1,4 @@ -using Npgsql.Internal; -using Npgsql.Util; +using Npgsql.Internal; using NUnit.Framework; using System; using System.IO; @@ -8,7 +7,7 @@ namespace Npgsql.Tests; -[NonParallelizable] // Parallel access to a single buffer +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] // Parallel access to a single buffer class ReadBufferTests { [Test] @@ -56,9 +55,9 @@ public void ReadDouble() public void ReadNullTerminatedString_buffered_only() { Writer - .Write(PGUtil.UTF8Encoding.GetBytes(new string("foo"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("foo"))) .WriteByte(0) - .Write(PGUtil.UTF8Encoding.GetBytes(new string("bar"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("bar"))) .WriteByte(0); ReadBuffer.Ensure(1); @@ -70,30 +69,27 @@ public void ReadNullTerminatedString_buffered_only() [Test] public async Task ReadNullTerminatedString_with_io() { - Writer.Write(PGUtil.UTF8Encoding.GetBytes(new string("Chunked "))); - ReadBuffer.Ensure(1); + Writer.Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("Chunked "))); + await ReadBuffer.Ensure(1, async: true); var task = ReadBuffer.ReadNullTerminatedString(async: true); Assert.That(!task.IsCompleted); Writer - .Write(PGUtil.UTF8Encoding.GetBytes(new string("string"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("string"))) .WriteByte(0) - .Write(PGUtil.UTF8Encoding.GetBytes(new string("bar"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("bar"))) .WriteByte(0); - Assert.That(task.IsCompleted); Assert.That(await task, Is.EqualTo("Chunked string")); Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("bar")); } -#pragma warning disable CS8625 [SetUp] public void SetUp() { var stream = new MockStream(); - ReadBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, PGUtil.UTF8Encoding, PGUtil.RelaxedUTF8Encoding); + ReadBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); Writer = stream.Writer; } -#pragma warning restore CS8625 // ReSharper disable once InconsistentNaming NpgsqlReadBuffer ReadBuffer = default!; @@ -136,12 +132,8 @@ async Task Read(byte[] buffer, int offset, int count, bool async) return count; } - internal class MockStreamWriter + internal class MockStreamWriter(MockStream stream) { - readonly MockStream _stream; - - public MockStreamWriter(MockStream stream) => _stream = stream; - public MockStreamWriter WriteByte(byte b) { Span bytes = stackalloc byte[1]; @@ -152,11 +144,11 @@ public MockStreamWriter WriteByte(byte b) public MockStreamWriter Write(ReadOnlySpan bytes) { - if (_stream._filled + bytes.Length > Size) + if (stream._filled + bytes.Length > Size) throw new Exception("Mock stream overrun"); - bytes.CopyTo(new Span(_stream._data, _stream._filled, bytes.Length)); - _stream._filled += bytes.Length; - _stream._tcs.TrySetResult(new()); + bytes.CopyTo(new Span(stream._data, stream._filled, bytes.Length)); + stream._filled += bytes.Length; + stream._tcs.TrySetResult(new()); return this; } } diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index d70f772e37..f7e3747489 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -1,7 +1,10 @@ -using System; +using System; +using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; +using System.Data.Common; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Npgsql.PostgresTypes; using NUnit.Framework; @@ -14,7 +17,7 @@ namespace Npgsql.Tests; /// Note that this API is also available on .NET Framework. /// For the old DataTable-based API, see . /// -public class ReaderNewSchemaTests : SyncOrAsyncTestBase +public class ReaderNewSchemaTests(SyncOrAsync syncOrAsync) : SyncOrAsyncTestBase(syncOrAsync) { // ReSharper disable once InconsistentNaming [Test] @@ -204,7 +207,7 @@ public async Task ColumnAttributeNumber() public async Task ColumnSize() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Column size is never unlimited on Redshift"); + await IgnoreOnRedshift(conn, "Column size is never unlimited on Redshift"); var table = await CreateTempTable(conn, "bounded VARCHAR(30), unbounded VARCHAR"); using var cmd = new NpgsqlCommand($"SELECT bounded,unbounded,'a'::VARCHAR(10),'b'::VARCHAR FROM {table}", conn); @@ -220,7 +223,7 @@ public async Task ColumnSize() public async Task IsAutoIncrement() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Serial columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Serial columns not support on Redshift"); var table = await CreateTempTable(conn, "serial SERIAL, int INT"); @@ -236,7 +239,7 @@ public async Task IsAutoIncrement() public async Task IsAutoIncrement_identity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = @@ -249,11 +252,41 @@ public async Task IsAutoIncrement_identity() Assert.That(columns[1].IsAutoIncrement, Is.True, "PG 10 IDENTITY not identified as autoincrement"); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5491")] + public async Task Partitioned_table() + { + await using var conn = await OpenConnectionAsync(); + await IgnoreOnRedshift(conn); + MinimumPgVersion(conn, "10.0", "Partitioned tables introduced in PostgreSQL 10"); + + var table = await GetTempTableName(conn); + await conn.ExecuteNonQueryAsync( + $""" + CREATE TABLE {table} ( + id INTEGER GENERATED BY DEFAULT AS IDENTITY NOT NULL, + event_date TIMESTAMP WITHOUT TIME ZONE NOT NULL, + user_name CHARACTER VARYING(500), + CONSTRAINT pk_{table} PRIMARY KEY (id, event_date) + ) PARTITION BY RANGE (event_date) + """); + + await using var cmd = new NpgsqlCommand($"SELECT id, event_date, user_name FROM {table}", conn); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); + var columns = await GetColumnSchema(reader); + + Assert.That(columns[0].AllowDBNull, Is.False); + Assert.That(columns[0].IsAutoIncrement, Is.True); + Assert.That(columns[1].AllowDBNull, Is.False); + Assert.That(columns[1].IsAutoIncrement, Is.False); + Assert.That(columns[2].AllowDBNull, Is.True); + Assert.That(columns[2].IsAutoIncrement, Is.False); + } + [Test] public async Task IsIdentity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = await CreateTempTable( conn, @@ -273,7 +306,7 @@ public async Task IsIdentity() public async Task IsKey() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); + await IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); var table = await CreateTempTable(conn, "id INT PRIMARY KEY, non_id INT, uniq INT UNIQUE"); using var cmd = new NpgsqlCommand($"SELECT id,non_id,uniq,8 FROM {table}", conn); @@ -294,7 +327,7 @@ public async Task IsKey() public async Task IsKey_composite() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); + await IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); var table = await CreateTempTable(conn, "id1 INT, id2 INT, PRIMARY KEY (id1, id2)"); using var cmd = new NpgsqlCommand($"SELECT id1,id2 FROM {table}", conn); @@ -308,7 +341,7 @@ public async Task IsKey_composite() public async Task IsLong() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "bytea not supported on Redshift"); + await IgnoreOnRedshift(conn, "bytea not supported on Redshift"); var table = await CreateTempTable(conn, "long BYTEA, non_long INT"); using var cmd = new NpgsqlCommand($"SELECT long, non_long, 8 FROM {table}", conn); @@ -351,7 +384,7 @@ public async Task IsReadOnly_on_non_column() public async Task IsUnique() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Unique not supported in reader schema on Redshift"); + await IgnoreOnRedshift(conn, "Unique not supported in reader schema on Redshift"); var table = await GetTempTableName(conn); await conn.ExecuteNonQueryAsync($@" @@ -373,7 +406,7 @@ await conn.ExecuteNonQueryAsync($@" public async Task NumericPrecision() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Precision is never unlimited on Redshift"); + await IgnoreOnRedshift(conn, "Precision is never unlimited on Redshift"); var table = await CreateTempTable(conn, "a NUMERIC(8), b NUMERIC, c INTEGER"); using var cmd = new NpgsqlCommand($"SELECT a,b,c,8.3::NUMERIC(8) FROM {table}", conn); @@ -389,7 +422,7 @@ public async Task NumericPrecision() public async Task NumericScale() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Scale is never unlimited on Redshift"); + await IgnoreOnRedshift(conn, "Scale is never unlimited on Redshift"); var table = await CreateTempTable(conn, "a NUMERIC(8,5), b NUMERIC, c INTEGER"); using var cmd = new NpgsqlCommand($"SELECT a,b,c,8.3::NUMERIC(8,5) FROM {table}", conn); @@ -431,7 +464,7 @@ public async Task DataType_unknown_type() public async Task DataType_with_composite() { await using var adminConnection = await OpenConnectionAsync(); - IgnoreOnRedshift(adminConnection, "Composite types not support on Redshift"); + await IgnoreOnRedshift(adminConnection, "Composite types not support on Redshift"); var type = await GetTempTypeName(adminConnection); await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (foo int)"); var tableName = await CreateTempTable(adminConnection, $"comp {type}"); @@ -450,6 +483,19 @@ public async Task DataType_with_composite() Assert.That(columns[1].UdtAssemblyQualifiedName, Is.EqualTo(typeof(SomeComposite).AssemblyQualifiedName)); } + [Test] + public async Task DataType_with_array() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INTEGER[]"); + + using var cmd = new NpgsqlCommand($"SELECT foo, ARRAY[1::INTEGER, 2::INTEGER] FROM {table}", conn); + using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); + var columns = await GetColumnSchema(reader); + Assert.That(columns[0].DataType, Is.SameAs(typeof(Array))); + Assert.That(columns[1].DataType, Is.SameAs(typeof(Array))); + } + [Test] public async Task UdtAssemblyQualifiedName() { @@ -654,10 +700,8 @@ await conn.ExecuteNonQueryAsync($@" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1553")] public async Task Domain_type() { - // if (IsMultiplexing) - // Assert.Ignore("Multiplexing: ReloadTypes"); using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Domain types not support on Redshift"); + await IgnoreOnRedshift(conn, "Domain types not support on Redshift"); const string domainTypeName = "my_domain"; var schema = await CreateTempSchema(conn); @@ -673,6 +717,8 @@ public async Task Domain_type() var pgType = domainSchema.PostgresType; Assert.That(pgType, Is.InstanceOf()); Assert.That(((PostgresDomainType)pgType).BaseType.Name, Is.EqualTo("character varying")); + // For domains we should return the underlying type + Assert.That(domainSchema.NpgsqlDbType, Is.EqualTo(NpgsqlTypes.NpgsqlDbType.Varchar)); } [Test] @@ -743,6 +789,25 @@ public async Task With_parameter_without_value() Assert.That(columns[0].ColumnName, Is.EqualTo("foo")); } + [Test] + public async Task GetColumnSchema_via_interface() + { + await using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INTEGER"); + + using var cmd = new NpgsqlCommand($"SELECT foo FROM {table} WHERE foo > @p", conn) + { + Parameters = { new() { ParameterName = "p", NpgsqlDbType = NpgsqlTypes.NpgsqlDbType.Integer } } + }; + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); + + var iface = (IDbColumnSchemaGenerator)reader; + var schema = iface.GetColumnSchema(); + Assert.That(schema, Is.Not.Null); + Assert.That(schema.Count, Is.EqualTo(1)); + Assert.That(schema[0], Is.Not.Null); + } + #region Not supported [Test] @@ -774,8 +839,6 @@ class SomeComposite public int Foo { get; set; } } - public ReaderNewSchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } - - async Task> GetColumnSchema(NpgsqlDataReader reader) - => IsAsync ? await reader.GetColumnSchemaAsync() : reader.GetColumnSchema(); + async Task> GetColumnSchema(NpgsqlDataReader reader) + => IsAsync ? (await reader.GetColumnSchemaAsync(CancellationToken.None)).Cast().ToArray() : reader.GetColumnSchema(); } diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index 92e3cf2e6d..ebb1d883fd 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Linq; using System.Threading.Tasks; @@ -11,7 +11,7 @@ namespace Npgsql.Tests; /// This tests the .NET Framework DbDataReader schema/metadata API, which returns DataTable. /// For the new CoreCLR API, see . /// -public class ReaderOldSchemaTests : SyncOrAsyncTestBase +public class ReaderOldSchemaTests(SyncOrAsync syncOrAsync) : SyncOrAsyncTestBase(syncOrAsync) { [Test] public async Task Primary_key_composite() @@ -55,7 +55,7 @@ public async Task Primary_key() public async Task IsAutoIncrement() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); + await IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); var table = await CreateTempTable(conn, "serial SERIAL, int INT"); @@ -72,7 +72,7 @@ public async Task IsAutoIncrement() public async Task IsAutoIncrement_identity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); + await IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = @@ -90,7 +90,7 @@ public async Task IsAutoIncrement_identity() public async Task IsIdentity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = await CreateTempTable( conn, @@ -118,32 +118,18 @@ await conn.ExecuteNonQueryAsync($@" CREATE TABLE {table} (id SERIAL PRIMARY KEY, int2 SMALLINT); CREATE OR REPLACE VIEW {view} (id, int2) AS SELECT id, int2 + int2 AS int2 FROM {table}"); - var command = new NpgsqlCommand($"SELECT * FROM {view}", conn); + var command = new NpgsqlCommand($"SELECT id, int2 FROM {view}", conn); - using var dr = command.ExecuteReader(); + using var dr = command.ExecuteReader(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); var metadata = await GetSchemaTable(dr); - foreach (var r in metadata!.Rows.OfType()) - { - switch ((string)r["ColumnName"]) - { - case "field_pk": - if (conn.PostgreSqlVersion < new Version("9.4")) - { - // 9.3 and earlier: IsUpdatable = False - Assert.IsTrue((bool)r["IsReadonly"], "field_pk"); - } - else - { - // 9.4: IsUpdatable = True - Assert.IsFalse((bool)r["IsReadonly"], "field_pk"); - } - break; - case "field_int2": - Assert.IsTrue((bool)r["IsReadonly"]); - break; - } - } + var idRow = metadata!.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "id"); + Assert.That(idRow, Is.Not.Null, "Unable to find metadata for id column"); + var int2Row = metadata.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "int2"); + Assert.That(int2Row, Is.Not.Null, "Unable to find metadata for int2 column"); + + Assert.That((bool)idRow!["IsReadonly"], Is.False); + Assert.That((bool)int2Row!["IsReadonly"]); } // ReSharper disable once InconsistentNaming @@ -156,19 +142,14 @@ public async Task AllowDBNull() using var cmd = new NpgsqlCommand($"SELECT * FROM {table}", conn); using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); using var metadata = await GetSchemaTable(reader); - foreach (var row in metadata!.Rows.OfType()) - { - var isNullable = (bool)row["AllowDBNull"]; - switch ((string)row["ColumnName"]) - { - case "nullable": - Assert.IsTrue(isNullable); - continue; - case "non_nullable": - Assert.IsFalse(isNullable); - continue; - } - } + + var nullableRow = metadata!.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "nullable"); + Assert.That(nullableRow, Is.Not.Null, "Unable to find metadata for nullable column"); + var nonNullableRow = metadata.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "non_nullable"); + Assert.That(nonNullableRow, Is.Not.Null, "Unable to find metadata for non_nullable column"); + + Assert.That((bool)nullableRow!["AllowDBNull"]); + Assert.That((bool)nonNullableRow!["AllowDBNull"], Is.False); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1027")] @@ -200,9 +181,6 @@ public async Task Precision_and_scale() [Test] public async Task SchemaOnly([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - // if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - // return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -259,7 +237,5 @@ CONSTRAINT PK_test_Cod PRIMARY KEY (Cod) Assert.That(dt.Rows[2]["ColumnName"].ToString(), Is.EqualTo("date")); } - public ReaderOldSchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } - async Task GetSchemaTable(NpgsqlDataReader dr) => IsAsync ? await dr.GetSchemaTableAsync() : dr.GetSchemaTable(); } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 5dc6bd6534..9ec1d056cd 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers.Binary; using System.Collections; using System.Data; @@ -10,23 +10,39 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Tests.Support; -using Npgsql.TypeMapping; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; +using NUnit.Framework.Constraints; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.SequentialAccess)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] -public class ReaderTests : MultiplexingTestBase +[TestFixture(CommandBehavior.Default)] +[TestFixture(CommandBehavior.SequentialAccess)] +public class ReaderTests : TestBase { + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + static uint ByteaOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bytea).Value; + + [Test] + public async Task Resumable_non_consumed_to_non_resumable() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( "SELECT 'aaaaaaaa', 1", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + await reader.IsDBNullAsync(0); // resumable, no consumption + _ = reader.IsDBNull(0); // resumable, no consumption + await using var stream = await reader.GetStreamAsync(0); // non-resumable + if (IsSequential) + Assert.That(() => reader.GetString(0), Throws.Exception.TypeOf()); + } + [Test] public async Task Seek_columns() { @@ -220,7 +236,7 @@ public async Task Get_string_with_parameter() using var dr = await command.ExecuteReaderAsync(Behavior); dr.Read(); var result = dr.GetString(0); - Assert.AreEqual(text, result); + Assert.That(result, Is.EqualTo(text)); } [Test] @@ -246,7 +262,7 @@ await conn.ExecuteNonQueryAsync($@" using var dr = await command.ExecuteReaderAsync(Behavior); dr.Read(); var result = dr.GetString(0); - Assert.AreEqual(test, result); + Assert.That(result, Is.EqualTo(test)); } [Test] @@ -287,7 +303,7 @@ public async Task GetFieldType_SchemaOnly() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT 1::INT4 AS some_column", conn); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); + await using var reader = await cmd.ExecuteReaderAsync(Behavior | CommandBehavior.SchemaOnly); reader.Read(); Assert.That(reader.GetFieldType(0), Is.SameAs(typeof(int))); } @@ -295,9 +311,6 @@ public async Task GetFieldType_SchemaOnly() [Test] public async Task GetPostgresType() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: Fails"); - using var conn = await OpenConnectionAsync(); PostgresType intType; using (var cmd = new NpgsqlCommand(@"SELECT 1::INTEGER AS some_column", conn)) @@ -365,14 +378,10 @@ public async Task GetDataTypeName(string typeName, string? normalizedName = null [Test] public async Task GetDataTypeName_enum() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1 - }; - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS ENUM ('one')"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug conn.ReloadTypes(); await using var cmd = new NpgsqlCommand($"SELECT 'one'::{typeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(Behavior); @@ -383,14 +392,10 @@ public async Task GetDataTypeName_enum() [Test] public async Task GetDataTypeName_domain() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1 - }; - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE DOMAIN {typeName} AS VARCHAR(10)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug conn.ReloadTypes(); await using var cmd = new NpgsqlCommand($"SELECT 'one'::{typeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(Behavior); @@ -464,7 +469,7 @@ public async Task GetValues() dr.Read(); var values = new object[4]; Assert.That(dr.GetValues(values), Is.EqualTo(3)); - Assert.That(values, Is.EqualTo(new object?[] { "hello", 1, new DateTime(2014, 1, 1), null })); + Assert.That(values, Is.EqualTo(new object?[] { "hello", 1, new DateOnly(2014, 1, 1), null })); } using (var dr = await command.ExecuteReaderAsync(Behavior)) { @@ -485,7 +490,7 @@ public async Task ExecuteReader_getting_empty_resultset_with_output_parameter() param.Direction = ParameterDirection.Output; command.Parameters.Add(param); using var dr = await command.ExecuteReaderAsync(Behavior); - Assert.IsFalse(dr.NextResult()); + Assert.That(dr.NextResult(), Is.False); } [Test] @@ -524,14 +529,14 @@ public async Task Read_past_reader_end() [Test] public async Task Reader_dispose_state_does_not_leak() { - if (IsMultiplexing || Behavior != CommandBehavior.Default) + if (Behavior != CommandBehavior.Default) return; var startReaderClosedTcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); var continueReaderClosedTcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); - using var _ = CreateTempPool(ConnectionString, out var connectionString); - await using var conn1 = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + await using var conn1 = await dataSource.OpenConnectionAsync(); var connID = conn1.Connector!.Id; var readerCloseTask = Task.Run(async () => { @@ -546,7 +551,7 @@ public async Task Reader_dispose_state_does_not_leak() }); await startReaderClosedTcs.Task; - await using var conn2 = await OpenConnectionAsync(connectionString); + await using var conn2 = await dataSource.OpenConnectionAsync(); Assert.That(conn2.Connector!.Id, Is.EqualTo(connID)); using var cmd = conn2.CreateCommand(); cmd.CommandText = "SELECT 1"; @@ -571,9 +576,6 @@ public async Task SingleResult() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/400")] public async Task Exception_thrown_from_ExecuteReaderAsync([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var function = await GetTempFunctionName(conn); @@ -592,9 +594,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1032")] public async Task Exception_thrown_from_NextResult([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var function = await GetTempFunctionName(conn); @@ -612,9 +611,10 @@ await conn.ExecuteNonQueryAsync($@" } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] - public async Task NpgsqlException_references_BatchCommand_with_single_command() + public async Task NpgsqlException_references_BatchCommand_with_single_command([Values] bool includeFailedBatchedCommand) { - await using var conn = await OpenConnectionAsync(); + await using var dataSource = CreateDataSource(x => x.IncludeFailedBatchedCommand = includeFailedBatchedCommand); + await using var conn = await dataSource.OpenConnectionAsync(); var function = await GetTempFunctionName(conn); await conn.ExecuteNonQueryAsync($@" @@ -627,19 +627,23 @@ await conn.ExecuteNonQueryAsync($@" cmd.CommandText = $"SELECT {function}()"; var exception = Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync(Behavior))!; - Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[0])); + if (includeFailedBatchedCommand) + Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[0])); + else + Assert.That(exception.BatchCommand, Is.Null); // Make sure the command isn't recycled by the connection when it's disposed - this is important since internal command // resources are referenced by the exception above, which is very likely to escape the using statement of the command. cmd.Dispose(); var cmd2 = conn.CreateCommand(); - Assert.AreNotSame(cmd2, cmd); + Assert.That(cmd, Is.Not.SameAs(cmd2)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] - public async Task NpgsqlException_references_BatchCommand_with_multiple_commands() + public async Task NpgsqlException_references_BatchCommand_with_multiple_commands([Values] bool includeFailedBatchedCommand) { - await using var conn = await OpenConnectionAsync(); + await using var dataSource = CreateDataSource(x => x.IncludeFailedBatchedCommand = includeFailedBatchedCommand); + await using var conn = await dataSource.OpenConnectionAsync(); var function = await GetTempFunctionName(conn); await conn.ExecuteNonQueryAsync($@" @@ -654,14 +658,17 @@ await conn.ExecuteNonQueryAsync($@" await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { var exception = Assert.ThrowsAsync(() => reader.NextResultAsync())!; - Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[1])); + if (includeFailedBatchedCommand) + Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[1])); + else + Assert.That(exception.BatchCommand, Is.Null); } // Make sure the command isn't recycled by the connection when it's disposed - this is important since internal command // resources are referenced by the exception above, which is very likely to escape the using statement of the command. cmd.Dispose(); var cmd2 = conn.CreateCommand(); - Assert.AreNotSame(cmd2, cmd); + Assert.That(cmd, Is.Not.SameAs(cmd2)); } #region SchemaOnly @@ -683,8 +690,8 @@ public async Task SchemaOnly_next_result_beyond_end() using var cmd = new NpgsqlCommand($"SELECT * FROM {table}", conn); using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); - Assert.False(reader.NextResult()); - Assert.False(reader.NextResult()); + Assert.That(reader.NextResult(), Is.False); + Assert.That(reader.NextResult(), Is.False); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4124")] @@ -740,7 +747,13 @@ public async Task Field_index_does_not_exist() using var command = new NpgsqlCommand("SELECT 1", conn); using var dr = await command.ExecuteReaderAsync(Behavior); dr.Read(); - Assert.That(() => dr[5], Throws.Exception.TypeOf()); + + Assert.That(() => dr[1], AssertExpectedException()); + Assert.That(() => dr.GetValue(2), AssertExpectedException()); + Assert.That(() => dr.GetFieldValue(3), AssertExpectedException()); + + static IResolveConstraint AssertExpectedException() + => Throws.Exception.TypeOf().With.Message.StartsWith("Ordinal is out of range"); } [Test, Description("Performs some operations while a reader is still open and checks for exceptions")] @@ -748,8 +761,7 @@ public async Task Reader_is_still_open() { await using var conn = await OpenConnectionAsync(); // We might get the connection, on which the second command was already prepared, so prepare wouldn't start the UserAction - if (!IsMultiplexing) - conn.UnprepareAll(); + conn.UnprepareAll(); using var cmd1 = new NpgsqlCommand("SELECT 1", conn); await using var reader1 = await cmd1.ExecuteReaderAsync(Behavior); Assert.That(() => conn.ExecuteNonQuery("SELECT 1"), Throws.Exception.TypeOf()); @@ -757,16 +769,12 @@ public async Task Reader_is_still_open() using var cmd2 = new NpgsqlCommand("SELECT 2", conn); Assert.That(() => cmd2.ExecuteReader(Behavior), Throws.Exception.TypeOf()); - if (!IsMultiplexing) - Assert.That(() => cmd2.Prepare(), Throws.Exception.TypeOf()); + Assert.That(() => cmd2.Prepare(), Throws.Exception.TypeOf()); } [Test] public async Task Cleans_up_ok_with_dispose_calls([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var command = new NpgsqlCommand("SELECT 1", conn); using var dr = await command.ExecuteReaderAsync(Behavior); @@ -798,6 +806,7 @@ public async Task Null() Assert.That(reader.GetFieldValue(i), Is.EqualTo(DBNull.Value)); Assert.That(reader.GetProviderSpecificValue(i), Is.EqualTo(DBNull.Value)); Assert.That(() => reader.GetString(i), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetStream(i), Throws.Exception.TypeOf()); } } @@ -808,9 +817,6 @@ public async Task Null() [IssueLink("https://github.com/npgsql/npgsql/issues/1898")] public async Task HasRows([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -851,7 +857,7 @@ public async Task HasRows([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepare command.CommandText = $"INSERT INTO {table} (name) VALUES ('foo'); SELECT * FROM {table}"; if (prepare == PrepareOrNot.Prepared) command.Prepare(); - using (var reader = await command.ExecuteReaderAsync()) + using (var reader = await command.ExecuteReaderAsync(Behavior)) { Assert.That(reader.HasRows, Is.True); reader.Read(); @@ -868,7 +874,7 @@ public async Task HasRows_without_resultset() var table = await CreateTempTable(conn, "name TEXT"); using var command = new NpgsqlCommand($"DELETE FROM {table} WHERE name = 'unknown'", conn); using var reader = await command.ExecuteReaderAsync(Behavior); - Assert.IsFalse(reader.HasRows); + Assert.That(reader.HasRows, Is.False); } [Test] @@ -877,12 +883,101 @@ public async Task Interval_as_TimeSpan() using var conn = await OpenConnectionAsync(); using var command = new NpgsqlCommand("SELECT CAST('1 hour' AS interval) AS dauer", conn); using var dr = await command.ExecuteReaderAsync(Behavior); - Assert.IsTrue(dr.HasRows); - Assert.IsTrue(dr.Read()); - Assert.IsTrue(dr.HasRows); + Assert.That(dr.HasRows); + Assert.That(dr.Read()); + Assert.That(dr.HasRows); var ts = dr.GetTimeSpan(0); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5439")] + public async Task SequentialBufferedSeek() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + cmd.CommandText = """select v.i, jsonb_build_object(), current_timestamp + make_interval(0, 0, 0, 0, 0, 0, v.i), null::jsonb, '{"value": 42}'::jsonb from generate_series(1, 1000) as v(i)"""; + var rdr = await cmd.ExecuteReaderAsync(Behavior); + while (await rdr.ReadAsync()) { + var v1 = rdr[0]; + var v2 = rdr[1]; + //_ = rdr[2]; // uncomment line for successful execution + var v3 = rdr[3]; + var v4 = rdr[4]; + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5430")] + public async Task SequentialBufferedSeekLong() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + cmd.CommandText = """select v.i, repeat('1', 10), repeat('2', 10), repeat('3', 10), repeat('4', 10), 1, 2 from generate_series(1, 1000) as v(i)"""; + var rdr = await cmd.ExecuteReaderAsync(Behavior); + while (await rdr.ReadAsync()) + { + _ = rdr[0]; + _ = rdr[1]; + //_ = rdr[2]; + //_ = rdr[3]; + //_ = rdr[4]; + //_ = rdr[5]; // uncomment lines for successful execution + _ = rdr[6]; + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5430")] + public async Task SequentialBufferedSeekReread() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + cmd.CommandText = """select v.i, repeat('1', 10), repeat('2', 10), repeat('3', 10), repeat('4', 10), 1, NULL from generate_series(1, 1000) as v(i)"""; + var rdr = await cmd.ExecuteReaderAsync(Behavior); + while (await rdr.ReadAsync()) + { + _ = rdr[0]; + _ = rdr[1]; + //_ = rdr[2]; + //_ = rdr[3]; + //_ = rdr[4]; + //_ = rdr[5]; // uncomment lines for successful execution + _ = rdr.IsDBNull(6); + _ = rdr[6]; + Assert.That(rdr.IsDBNull(6)); + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5484")] + public async Task GetFieldValueAsync_AsyncRead() + { + if (!IsSequential) + return; + + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + + var expected = new byte[10000]; + expected.AsSpan().Fill(1); + + var pgMock = await postmasterMock.WaitForServerConnection(); + await pgMock + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(ByteaOid)) + .WriteDataRowWithFlush(expected); + + using var cmd = new NpgsqlCommand("irrelevant", conn); + var reader = await cmd.ExecuteReaderAsync(Behavior); + while (await reader.ReadAsync()) + { + var task = reader.GetFieldValueAsync(0); + await pgMock + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + Assert.That(await task, Is.EqualTo(expected)); + } + } + [Test] public async Task Close_connection_in_middle_of_row() { @@ -927,14 +1022,14 @@ public async Task Invalid_cast() using var conn = await OpenConnectionAsync(); // Chunking type handler using (var cmd = new NpgsqlCommand("SELECT 'foo'", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { reader.Read(); Assert.That(() => reader.GetInt32(0), Throws.Exception.TypeOf()); } // Simple type handler using (var cmd = new NpgsqlCommand("SELECT 1", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { reader.Read(); Assert.That(() => reader.GetDateTime(0), Throws.Exception.TypeOf()); @@ -947,7 +1042,7 @@ public async Task Many_reads() { using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand($"SELECT generate_series(1, {conn.Settings.ReadBufferSize})", conn); - using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync(Behavior); for (var i = 1; i <= conn.Settings.ReadBufferSize; i++) { Assert.That(reader.Read(), Is.True); @@ -959,6 +1054,10 @@ public async Task Many_reads() [Test] public async Task Nullable_scalar() { + // We read the same column multiple times + if (IsSequential) + return; + using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var p1 = new NpgsqlParameter { ParameterName = "p1", Value = DBNull.Value, NpgsqlDbType = NpgsqlDbType.Smallint }; @@ -967,7 +1066,7 @@ public async Task Nullable_scalar() Assert.That(p2.DbType, Is.EqualTo(DbType.Int16)); cmd.Parameters.Add(p1); cmd.Parameters.Add(p2); - using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync(Behavior); reader.Read(); for (var i = 0; i < cmd.Parameters.Count; i++) @@ -992,10 +1091,6 @@ public async Task Nullable_scalar() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2913")] public async Task Bug2913_reading_previous_query_messages() { - // No point in testing for multiplexing, as every query may use another connection - if (IsMultiplexing) - return; - var firstMrs = new ManualResetEventSlim(false); var secondMrs = new ManualResetEventSlim(false); @@ -1107,14 +1202,12 @@ public async Task Reader_reuse_on_dispose() [Test] public async Task Unbound_reader_reuse() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MinPoolSize = 1, - MaxPoolSize = 1, - }; - using var _ = CreateTempPool(csb.ToString(), out var connectionString); - - await using var conn1 = await OpenConnectionAsync(connectionString); + csb.MinPoolSize = 1; + csb.MaxPoolSize = 1; + }); + await using var conn1 = await dataSource.OpenConnectionAsync(); using var cmd1 = conn1.CreateCommand(); cmd1.CommandText = "SELECT 1"; var reader1 = await cmd1.ExecuteReaderAsync(Behavior); @@ -1127,7 +1220,7 @@ public async Task Unbound_reader_reuse() await conn1.CloseAsync(); } - await using var conn2 = await OpenConnectionAsync(connectionString); + await using var conn2 = await dataSource.OpenConnectionAsync(); using var cmd2 = conn2.CreateCommand(); cmd2.CommandText = "SELECT 2"; var reader2 = await cmd2.ExecuteReaderAsync(Behavior); @@ -1141,7 +1234,7 @@ public async Task Unbound_reader_reuse() await conn2.CloseAsync(); } - await using var conn3 = await OpenConnectionAsync(connectionString); + await using var conn3 = await dataSource.OpenConnectionAsync(); using var cmd3 = conn3.CreateCommand(); cmd3.CommandText = "SELECT 3"; var reader3 = await cmd3.ExecuteReaderAsync(Behavior); @@ -1163,14 +1256,14 @@ public async Task Bug3772() return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4), new FieldDescription(PostgresTypeOIDs.Bytea)); + .WriteRowDescription(new FieldDescription(Int4Oid), new FieldDescription(ByteaOid)); var intValue = new byte[] { 0, 0, 0, 1 }; var byteValue = new byte[] { 1, 2, 3, 4 }; @@ -1190,8 +1283,8 @@ public async Task Bug3772() reader.GetInt32(0); - Assert.Zero(reader.Connector.ReadBuffer.ReadBytesLeft); - Assert.NotZero(reader.Connector.ReadBuffer.ReadPosition); + Assert.That(reader.Connector.ReadBuffer.ReadBytesLeft, Is.Zero); + Assert.That(reader.Connector.ReadBuffer.ReadPosition, Is.Not.Zero); writeBuffer.WriteInt32(byteValue.Length); writeBuffer.WriteBytes(byteValue); @@ -1210,20 +1303,20 @@ await pgMock public async Task Dispose_does_not_swallow_exceptions([Values(true, false)] bool async) { await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); // Write responses for the query, but break the connection before sending CommandComplete/ReadyForQuery await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT 1", conn); - using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync(Behavior); await reader.ReadAsync(); pgMock.Close(); @@ -1234,6 +1327,24 @@ await pgMock Assert.ThrowsAsync(async () => await reader.DisposeAsync()); } + [Test] + public async Task Read_string_as_char() + { + await using var conn = await OpenConnectionAsync(); + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 'abcdefgh', 'ijklmnop'"; + + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + Assert.That(await reader.ReadAsync()); + Assert.That(reader.GetChar(0), Is.EqualTo('a')); + if (Behavior == CommandBehavior.SequentialAccess) + Assert.Throws(() => reader.GetChar(0)); + else + Assert.That(reader.GetChar(0), Is.EqualTo('a')); + Assert.That(reader.GetChar(1), Is.EqualTo('i')); + } + #region GetBytes / GetStream [Test] @@ -1243,7 +1354,7 @@ public async Task GetBytes() var table = await CreateTempTable(conn, "bytes BYTEA"); // TODO: This is too small to actually test any interesting sequential behavior - byte[] expected = { 1, 2, 3, 4, 5 }; + byte[] expected = [1, 2, 3, 4, 5]; var actual = new byte[expected.Length]; await conn.ExecuteNonQueryAsync($"INSERT INTO {table} (bytes) VALUES ({EncodeByteaHex(expected)})"); @@ -1268,18 +1379,13 @@ public async Task GetBytes() Assert.That(actual, Is.EqualTo(expected)); Assert.That(reader.GetBytes(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); - Assert.That(() => reader.GetBytes(1, 0, null, 0, 0), Throws.Exception.TypeOf(), - "GetBytes on non-bytea"); - Assert.That(() => reader.GetBytes(1, 0, actual, 0, 1), - Throws.Exception.TypeOf(), - "GetBytes on non-bytea"); Assert.That(reader.GetString(1), Is.EqualTo("foo")); reader.GetBytes(2, 0, actual, 0, 2); // Jump to another column from the middle of the column reader.GetBytes(4, 0, actual, 0, 2); Assert.That(reader.GetBytes(4, expected.Length - 1, actual, 0, 2), Is.EqualTo(1), "Length greater than data length"); - Assert.That(actual[0], Is.EqualTo(expected[expected.Length - 1]), "Length greater than data length"); + Assert.That(actual[0], Is.EqualTo(expected[^1]), "Length greater than data length"); Assert.That(() => reader.GetBytes(4, 0, actual, 0, actual.Length + 1), Throws.Exception.TypeOf(), "Length great than output buffer length"); // Close in the middle of a column @@ -1290,7 +1396,7 @@ public async Task GetBytes() } [Test] - public async Task GetStream_second_time_throws([Values(true, false)] bool isAsync) + public async Task GetStream_second_time([Values(true, false)] bool isAsync) { var expected = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; var streamGetter = BuildStreamGetter(isAsync); @@ -1303,8 +1409,39 @@ public async Task GetStream_second_time_throws([Values(true, false)] bool isAsyn using var stream = await streamGetter(reader, 0); - Assert.That(async () => await streamGetter(reader, 0), - Throws.Exception.TypeOf()); + if (IsSequential) + { + Assert.That(async () => await streamGetter(reader, 0), + Throws.Exception.TypeOf()); + } + else + { + // Non-sequential: getting a second stream disposes the first and returns a fresh one. + using var stream2 = await streamGetter(reader, 0); + Assert.That(() => stream.Read(new byte[1]), Throws.TypeOf()); + var buf = new byte[8]; + Assert.That(stream2.Read(buf), Is.EqualTo(8)); + Assert.That(buf, Is.EqualTo(expected)); + } + } + + [Test] + public async Task GetBytes_before_getstream([Values(true, false)] bool isAsync) + { + var expected = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; + var streamGetter = BuildStreamGetter(isAsync); + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand($"SELECT {EncodeByteaHex(expected)}::bytea", conn); + using var reader = await cmd.ExecuteReaderAsync(Behavior); + + await reader.ReadAsync(); + + // GetBytes with null buffer won't consume column in any way + Assert.That(reader.GetBytes(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); + + using var stream = await streamGetter(reader, 0); + Assert.That(stream.Length, Is.EqualTo(expected.Length)); } public static IEnumerable GetStreamCases() @@ -1453,6 +1590,50 @@ public async Task GetStream_in_middle_of_column_throws([Values] bool async) Assert.That(() => reader.GetStream(0), Throws.Exception.TypeOf()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5223")] + public async Task GetStream_seek() + { + // Sequential doesn't allow to seek + if (IsSequential) + return; + + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 'abcdefgh'"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var buffer = new byte[4]; + + await using var stream = reader.GetStream(0); + Assert.That(stream.CanSeek); + + var seekPosition = stream.Seek(-1, SeekOrigin.End); + Assert.That(seekPosition, Is.EqualTo(stream.Length - 1)); + var read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(1)); + Assert.That(Encoding.ASCII.GetString(buffer, 0, 1), Is.EqualTo("h")); + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(0)); + + seekPosition = stream.Seek(2, SeekOrigin.Begin); + Assert.That(seekPosition, Is.EqualTo(2)); + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(buffer.Length)); + Assert.That(Encoding.ASCII.GetString(buffer), Is.EqualTo("cdef")); + + seekPosition = stream.Seek(-3, SeekOrigin.Current); + Assert.That(seekPosition, Is.EqualTo(3)); + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(buffer.Length)); + Assert.That(Encoding.ASCII.GetString(buffer), Is.EqualTo("defg")); + + stream.Position = 1; + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(buffer.Length)); + Assert.That(Encoding.ASCII.GetString(buffer), Is.EqualTo("bcde")); + } + #endregion GetBytes / GetStream #region GetChars / GetTextReader @@ -1474,7 +1655,8 @@ public async Task GetChars() Assert.That(reader.GetChars(0, 0, actual, 0, 2), Is.EqualTo(2)); Assert.That(actual[0], Is.EqualTo(expected[0])); Assert.That(actual[1], Is.EqualTo(expected[1])); - Assert.That(reader.GetChars(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); + if (!IsSequential) + Assert.That(reader.GetChars(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); // Note: Unlike with bytea, finding out the length of the column consumes it (variable-width // UTF8 encoding) Assert.That(reader.GetChars(2, 0, actual, 0, 2), Is.EqualTo(2)); @@ -1496,12 +1678,36 @@ public async Task GetChars() // Jump to another column from the middle of the column reader.GetChars(5, 0, actual, 0, 2); Assert.That(reader.GetChars(5, expected.Length - 1, actual, 0, 2), Is.EqualTo(1), "Length greater than data length"); - Assert.That(actual[0], Is.EqualTo(expected[expected.Length - 1]), "Length greater than data length"); + Assert.That(actual[0], Is.EqualTo(expected[^1]), "Length greater than data length"); Assert.That(() => reader.GetChars(5, 0, actual, 0, actual.Length + 1), Throws.Exception.TypeOf(), "Length great than output buffer length"); // Close in the middle of a column reader.GetChars(6, 0, actual, 0, 2); } + [Test] + public async Task GetChars_AdvanceConsumed() + { + const string value = "01234567"; + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand($"SELECT '{value}'", conn); + using var reader = await cmd.ExecuteReaderAsync(Behavior); + reader.Read(); + + var buffer = new char[2]; + // Don't start at the beginning of the column. + reader.GetChars(0, 2, buffer, 0, 2); + reader.GetChars(0, 4, buffer, 0, 2); + reader.GetChars(0, 6, buffer, 0, 2); + + // Ask for data past the start and the previous point, exercising restart logic. + if (!IsSequential) + { + reader.GetChars(0, 4, buffer, 0, 2); + reader.GetChars(0, 6, buffer, 0, 2); + } + } + [Test] public async Task GetTextReader([Values(true, false)] bool isAsync) { @@ -1527,9 +1733,16 @@ public async Task GetTextReader([Values(true, false)] bool isAsync) textReader.Read(actual, 0, 2); Assert.That(actual[0], Is.EqualTo(expected[0])); Assert.That(actual[1], Is.EqualTo(expected[1])); - Assert.That(async () => await textReaderGetter(reader, 0), - Throws.Exception.TypeOf(), - "Sequential text reader twice on same column"); + if (IsSequential) + { + Assert.That(async () => await textReaderGetter(reader, 0), + Throws.Exception.TypeOf(), + "Sequential text reader twice on same column"); + } + else + { + Assert.That(reader.GetChars(0, 0, actual, 4, 1), Is.EqualTo(1)); + } textReader.Read(actual, 2, 1); Assert.That(actual[2], Is.EqualTo(expected[2])); textReader.Dispose(); @@ -1552,8 +1765,8 @@ public async Task TextReader_zero_length_column() await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT ''"; - await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + Assert.That(await reader.ReadAsync()); using var textReader = reader.GetTextReader(0); Assert.That(textReader.Peek(), Is.EqualTo(-1)); @@ -1602,9 +1815,6 @@ public async Task GetChars_when_null() [Test] public async Task Reader_is_reused() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: Fails"); - using var conn = await OpenConnectionAsync(); NpgsqlDataReader reader1; @@ -1662,14 +1872,259 @@ public async Task GetTextReader_in_middle_of_column_throws([Values] bool async) Assert.That(() => reader.GetTextReader(0), Throws.Exception.TypeOf()); } + [Test] + public async Task GetStream_is_isolated_from_GetChars() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // GetChars on the column, then get a stream — stream should start from the beginning. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("ABCDE")); + + await using var stream = reader.GetStream(0); + Assert.That(stream.CanSeek, Is.True); + Assert.That(stream.Length, Is.EqualTo(Encoding.UTF8.GetByteCount(str))); + Assert.That(stream.Position, Is.EqualTo(0)); + } + + [Test] + public async Task GetStream_survives_reread_of_same_column() + { + if (IsSequential) + return; + + var expected = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT {EncodeByteaHex(expected)}::bytea", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get stream, read partially. + var stream1 = reader.GetStream(0); + var buf = new byte[3]; + Assert.That(stream1.Read(buf), Is.EqualTo(3)); + Assert.That(buf, Is.EqualTo(new byte[] { 1, 2, 3 })); + + // Getting a second stream on same column disposes the first. + var stream2 = reader.GetStream(0); + Assert.That(() => stream1.Read(new byte[1]), Throws.TypeOf()); + + // Second stream should provide the full data. + var buf2 = new byte[8]; + Assert.That(stream2.Read(buf2), Is.EqualTo(8)); + Assert.That(buf2, Is.EqualTo(expected)); + } + + [Test] + public async Task GetTextReader_and_GetChars_interleaved() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJKLMNOP"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get a TextReader, read some chars. + var textReader = reader.GetTextReader(0); + var buf = new char[4]; + Assert.That(textReader.Read(buf, 0, 4), Is.EqualTo(4)); + Assert.That(new string(buf), Is.EqualTo("ABCD")); + + // Now use GetChars to read from the start — should not affect the TextReader. + var charsBuf = new char[6]; + Assert.That(reader.GetChars(0, 0, charsBuf, 0, 6), Is.EqualTo(6)); + Assert.That(new string(charsBuf), Is.EqualTo("ABCDEF")); + + // TextReader should still be at its original position. + Assert.That(textReader.Read(buf, 0, 4), Is.EqualTo(4)); + Assert.That(new string(buf), Is.EqualTo("EFGH")); + } + + [Test] + public async Task GetStream_and_GetChars_on_same_column() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get a stream and read partially. + await using var stream = reader.GetStream(0); + var buf = new byte[4]; + Assert.That(stream.Read(buf), Is.EqualTo(4)); + + // GetChars on the same column — stream should remain valid. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("ABCDE")); + + // Stream should still be readable from where we left off. + Assert.That(stream.Read(buf), Is.EqualTo(4)); + + // GetChars at a different offset should also work. + Assert.That(reader.GetChars(0, 5, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("FGHIJ")); + } + + [Test] + public async Task GetStream_seek_with_SubReadStream() + { + if (IsSequential) + return; + + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 'abcdefgh'::bytea", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + await using var stream = reader.GetStream(0); + Assert.That(stream.CanSeek, Is.True); + + // Read, seek back, read again — isolated from ReadPosition. + var buf = new byte[4]; + Assert.That(stream.Read(buf), Is.EqualTo(4)); + + stream.Position = 0; + var buf2 = new byte[4]; + Assert.That(stream.Read(buf2), Is.EqualTo(4)); + Assert.That(buf2, Is.EqualTo(buf)); + + // Seek to end, confirm empty. + stream.Seek(0, SeekOrigin.End); + Assert.That(stream.Read(buf), Is.EqualTo(0)); + } + + [Test] + public async Task GetChars_after_unconsumed_GetStream() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get a stream but don't read from it. + var stream = reader.GetStream(0); + + // GetChars should work — SubReadStream is isolated. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("ABCDE")); + + // Stream is still valid. + var buf = new byte[3]; + Assert.That(stream.Read(buf), Is.EqualTo(3)); + } + + [Test] + public async Task Multiple_GetChars_calls_after_GetTextReader() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + var textReader = reader.GetTextReader(0); + var buf = new char[2]; + textReader.Read(buf, 0, 2); + Assert.That(new string(buf), Is.EqualTo("AB")); + + // Multiple GetChars calls at different offsets should all work. + var charBuf = new char[3]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 3), Is.EqualTo(3)); + Assert.That(new string(charBuf), Is.EqualTo("ABC")); + + Assert.That(reader.GetChars(0, 5, charBuf, 0, 3), Is.EqualTo(3)); + Assert.That(new string(charBuf), Is.EqualTo("FGH")); + + // TextReader should still work from where it was. + textReader.Read(buf, 0, 2); + Assert.That(new string(buf), Is.EqualTo("CD")); + } + + [Test] + public async Task Sequential_GetChars_advances_field_position([Values] bool fitsInBuffer) + { + // Invariant: GetChars must always advance ReadPosition (even when data is fully buffered), + // so that FieldAtStart correctly reflects consumption. This ensures the sequential seek + // guard blocks non-resumable re-reads after GetChars has consumed data. + // This is important for behavioral consistency across columns. As an optimization + // that skips ReadPosition advancement (reading from a view over the buffer) would pass the sequential seek guard. + if (!IsSequential) + return; + + await using var conn = await OpenConnectionAsync(); + var bufferSize = conn.Settings.ReadBufferSize; + var str = new string('x', fitsInBuffer ? 10 : bufferSize * 2); + + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // GetChars consumes part of the column. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + + // A non-resumable read on the same column should throw — field is no longer at start. + Assert.That(() => reader.GetString(0), Throws.Exception.TypeOf()); + } + #endregion GetChars / GetTextReader + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5450")] + public async Task EndRead_StreamActive([Values]bool async) + { + const int columnLength = 1; + + await using var conn = await OpenConnectionAsync(); + var buffer = conn.Connector!.ReadBuffer; + buffer.AddBytesToRead(columnLength); + var reader = buffer.PgReader; + reader.Init(DataFormat.Binary, columnLength, resumable: false); + if (async) + await reader.StartReadAsync(new(DataFormat.Binary, Size.Unknown), CancellationToken.None); + else + reader.StartRead(new(DataFormat.Binary, Size.Unknown)); + + await using (var _ = reader.GetStream()) + { + if (async) + Assert.DoesNotThrowAsync(async () => await reader.EndReadAsync()); + else + Assert.DoesNotThrow(() => reader.EndRead()); + } + + reader.Commit(); + } + [Test, Description("Tests that everything goes well when a type handler generates a NpgsqlSafeReadException")] public async Task SafeReadException() { var dataSourceBuilder = CreateDataSourceBuilder(); // Temporarily reroute integer to go to a type handler which generates SafeReadExceptions - dataSourceBuilder.AddTypeResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: true)); + dataSourceBuilder.AddTypeInfoResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: true)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -1686,14 +2141,14 @@ public async Task Non_SafeReadException() { var dataSourceBuilder = CreateDataSourceBuilder(); // Temporarily reroute integer to go to a type handler which generates some exception - dataSourceBuilder.AddTypeResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: false)); + dataSourceBuilder.AddTypeInfoResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: false)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT 1, 'hello'", connection); await using var reader = await cmd.ExecuteReaderAsync(Behavior); await reader.ReadAsync(); - Assert.That(() => reader.GetInt32(0), Throws.Exception.With.Message.EqualTo("Non-safe read exception as requested")); + Assert.That(() => reader.GetInt32(0), Throws.Exception.With.Message.EqualTo("Broken")); Assert.That(connection.FullState, Is.EqualTo(ConnectionState.Broken)); Assert.That(connection.State, Is.EqualTo(ConnectionState.Closed)); } @@ -1703,27 +2158,24 @@ public async Task Non_SafeReadException() [Test, Description("Cancels ReadAsync via the NpgsqlCommand.Cancel, with successful PG cancellation")] public async Task ReadAsync_cancel_command_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT some_int FROM some_table", conn); - await using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first row - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -1752,27 +2204,24 @@ await pgMock [Test, Description("Cancels ReadAsync via the cancellation token, with successful PG cancellation")] public async Task ReadAsync_cancel_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT some_int FROM some_table", conn); - await using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first row - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -1803,28 +2252,25 @@ await pgMock [Test, Description("Cancels NextResultAsync via the cancellation token, with successful PG cancellation")] public async Task NextResult_cancel_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, only for the first resultset (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .WriteCommandComplete() .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); - await using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first resultset - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to advance to the second resultset - simulate blocking and cancellation @@ -1855,19 +2301,16 @@ await pgMock [Test, Description("Cancels ReadAsync via the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task ReadAsync_cancel_hard([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); @@ -1875,7 +2318,7 @@ await pgMock await using var reader = await cmd.ExecuteReaderAsync(Behavior); // Successfully read the first row - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -1899,19 +2342,16 @@ await pgMock [Test, Description("Cancels NextResultAsync via the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task NextResultAsync_cancel_hard([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .WriteCommandComplete() .FlushAsync(); @@ -1920,7 +2360,7 @@ await pgMock await using var reader = await cmd.ExecuteReaderAsync(Behavior); // Successfully read the first resultset - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -1944,22 +2384,19 @@ await pgMock [Test, Description("Cancels sequential ReadAsGetFieldValueAsync")] public async Task GetFieldValueAsync_sequential_cancel([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) + .WriteRowDescription(new FieldDescription(ByteaOid)) .WriteDataRowWithFlush(new byte[10000]); using var cmd = new NpgsqlCommand("SELECT some_bytea FROM some_table", conn); @@ -1982,22 +2419,19 @@ await pgMock [Test, Description("Cancels sequential ReadAsGetFieldValueAsync")] public async Task IsDBNullAsync_sequential_cancel([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea), new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(ByteaOid), new FieldDescription(Int4Oid)) .WriteDataRowWithFlush(new byte[10000], new byte[4]); using var cmd = new NpgsqlCommand("SELECT some_bytea, some_int FROM some_table", conn); @@ -2017,24 +2451,6 @@ await pgMock Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); } - [Test, Description("Cancellation does not work with the multiplexing")] - public async Task Cancel_multiplexing_disabled() - { - if (!IsMultiplexing) - return; - - using var _ = CreateTempPool(ConnectionString, out var connString); - await using var conn = await OpenConnectionAsync(connString); - await using var cmd = new NpgsqlCommand("SELECT generate_series(1, 100); SELECT generate_series(1, 100)", conn); - await using var reader = await cmd.ExecuteReaderAsync(Behavior); - var cancelledToken = new CancellationToken(canceled: true); - Assert.IsTrue(await reader.ReadAsync()); - while (await reader.ReadAsync(cancelledToken)) { } - Assert.IsTrue(await reader.NextResultAsync(cancelledToken)); - while (await reader.ReadAsync(cancelledToken)) { } - Assert.IsFalse(conn.Connector!.UserCancellationRequested); - } - #endregion Cancellation #region Timeout @@ -2042,26 +2458,25 @@ public async Task Cancel_multiplexing_disabled() [Test, Description("Timeouts sequential ReadAsGetFieldValueAsync")] public async Task GetFieldValueAsync_sequential_timeout() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - csb.CommandTimeout = 3; - csb.CancellationTimeout = 15000; + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + CommandTimeout = 3, + CancellationTimeout = 15000 + }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString()); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) + .WriteRowDescription(new FieldDescription(ByteaOid)) .WriteDataRowWithFlush(new byte[10000]); using var cmd = new NpgsqlCommand("SELECT some_bytea FROM some_table", conn); @@ -2080,26 +2495,25 @@ await pgMock [Test, Description("Timeouts sequential IsDBNullAsync")] public async Task IsDBNullAsync_sequential_timeout() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - csb.CommandTimeout = 3; - csb.CancellationTimeout = 15000; + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + CommandTimeout = 3, + CancellationTimeout = 15000 + }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString()); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea), new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(ByteaOid), new FieldDescription(Int4Oid)) .WriteDataRowWithFlush(new byte[10000], new byte[4]); using var cmd = new NpgsqlCommand("SELECT some_bytea, some_int FROM some_table", conn); @@ -2118,18 +2532,15 @@ await pgMock [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3446")] public async Task Bug3446() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(new byte[4]) .FlushAsync(); @@ -2148,6 +2559,41 @@ await pgMock Assert.That(conn.Connector!.State, Is.EqualTo(ConnectorState.Ready)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6160")] + [Description("Consuming result set shouldn't go infinite in case connection is broken")] + public async Task Bug6160() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + // Set to -1 to trigger immediate connection break on timeout + CancellationTimeout = -1, + CommandTimeout = 1 + }; + await using var postmasterMock = PgPostmasterMock.Start(csb.ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + + var pgMock = await postmasterMock.WaitForServerConnection(); + await pgMock + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteDataRow(new byte[4]) + .FlushAsync(); + + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + await using (var reader = await cmd.ExecuteReaderAsync(Behavior | CommandBehavior.SingleRow)) + { + await reader.ReadAsync(); + // The second read will try to consume the whole resultset due to CommandBehavior.SingleRow + // Which will fail with timeout (and immediate connection break) since we didn't send anything else beside the first row + var ex = Assert.ThrowsAsync(async () => await reader.ReadAsync())!; + Assert.That(ex.InnerException, Is.TypeOf()); + + Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + } + } + #endregion #region Initialization / setup / teardown @@ -2157,7 +2603,7 @@ await pgMock readonly CommandBehavior Behavior; // ReSharper restore InconsistentNaming - public ReaderTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) : base(multiplexingMode) + public ReaderTests(CommandBehavior behavior) { Behavior = behavior; IsSequential = (Behavior & CommandBehavior.SequentialAccess) != 0; @@ -2168,57 +2614,49 @@ public ReaderTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) #region Mock Type Handlers -class ExplodingTypeHandlerResolverFactory : TypeHandlerResolverFactory +sealed class ExplodingTypeHandlerResolverFactory(bool safe) : PgTypeInfoResolverFactory { - readonly bool _safe; - public ExplodingTypeHandlerResolverFactory(bool safe) => _safe = safe; - public override TypeHandlerResolver Create(NpgsqlConnector connector) => new ExplodingTypeHandlerResolver(_safe); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(safe); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; - public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); - public override string? GetDataTypeNameByClrType(Type clrType) => throw new NotSupportedException(); - public override string? GetDataTypeNameByValueDependentValue(object value) => throw new NotSupportedException(); - - class ExplodingTypeHandlerResolver : TypeHandlerResolver + sealed class Resolver(bool safe) : IPgTypeInfoResolver { - readonly bool _safe; - - public ExplodingTypeHandlerResolver(bool safe) => _safe = safe; + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new PgConcreteTypeInfo(options, new ExplodingTypeHandler(safe), DataTypeNames.Int4); - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => - typeName == "integer" ? new ExplodingTypeHandler(null!, _safe) : null; - public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; - public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotImplementedException(); + return null; + } } } -class ExplodingTypeHandler : NpgsqlSimpleTypeHandler +class ExplodingTypeHandler : PgBufferedConverter { readonly bool _safe; - internal ExplodingTypeHandler(PostgresType postgresType, bool safe) : base(postgresType) => _safe = safe; + internal ExplodingTypeHandler(bool safe) => _safe = safe; - public override int Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - buf.ReadInt32(); + public override Size GetSize(SizeContext context, int value, ref object? writeState) + => throw new NotSupportedException(); - throw _safe - ? new Exception("Safe read exception as requested") - : buf.Connector.Break(new Exception("Non-safe read exception as requested")); + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Value; + return format is DataFormat.Binary; } - public override int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => throw new NotSupportedException(); - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - public override void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => throw new NotSupportedException(); - - public override Task WriteObjectWithLength( - object? value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) + protected override void WriteCore(PgWriter writer, int value) => throw new NotSupportedException(); + + protected override int ReadCore(PgReader reader) + { + if (_safe) + throw new Exception("Safe read exception as requested"); + + reader.BreakConnection(); + return default; + } } #endregion diff --git a/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs index a8a363a583..6e0f35a51a 100644 --- a/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using NUnit.Framework; @@ -16,7 +16,6 @@ namespace Npgsql.Tests.Replication; /// for the individual logical replication tests, they are in fact not, because /// the methods they test are extension points for plugin developers. /// -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] public class CommonLogicalReplicationTests : SafeReplicationTestBase { diff --git a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs index c00c7a8ecc..6038e5c854 100644 --- a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.Concurrent; +using System; using System.Collections.Generic; using System.IO; using System.Runtime.CompilerServices; @@ -15,7 +14,6 @@ namespace Npgsql.Tests.Replication; [TestFixture(typeof(LogicalReplicationConnection))] [TestFixture(typeof(PhysicalReplicationConnection))] -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] public class CommonReplicationTests : SafeReplicationTestBase where TConnection : ReplicationConnection, new() @@ -317,10 +315,8 @@ await c.ExecuteNonQueryAsync(@$" // will occupy the connection it is bound to. var insertTask = Task.Run(async () => { - await using var insertConn = await OpenConnectionAsync(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c synchronous_commit=on" - }); + await using var dataSource = CreateDataSource(csb => csb.Options = "-c synchronous_commit=on"); + await using var insertConn = await dataSource.OpenConnectionAsync(); await insertConn.ExecuteNonQueryAsync($"INSERT INTO {tableName} (name) VALUES ('{value1String}')"); }); @@ -353,10 +349,8 @@ await c.ExecuteNonQueryAsync(@$" var value2String = Guid.NewGuid().ToString("B"); insertTask = Task.Run(async () => { - await using var insertConn = OpenConnection(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c synchronous_commit=remote_apply" - }); + await using var dataSource = CreateDataSource(csb => csb.Options = "-c synchronous_commit=remote_apply"); + await using var insertConn = await dataSource.OpenConnectionAsync(); await insertConn.ExecuteNonQueryAsync($"INSERT INTO {tableName} (name) VALUES ('{value2String}')"); }); @@ -382,10 +376,8 @@ await c.ExecuteNonQueryAsync(@$" var value3String = Guid.NewGuid().ToString("B"); insertTask = Task.Run(async () => { - await using var insertConn = OpenConnection(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c synchronous_commit=remote_write" - }); + await using var dataSource = CreateDataSource(csb => csb.Options = "-c synchronous_commit=remote_write"); + await using var insertConn = await dataSource.OpenConnectionAsync(); await insertConn.ExecuteNonQueryAsync($"INSERT INTO {tableName} (name) VALUES ('{value3String}')"); }); @@ -439,7 +431,7 @@ async Task GetCommitLsn(string valueString) // NpgsqlLogicalReplicationConnection // Begin Transaction, Insert, Commit Transaction for (var i = 0; i < 3; i++) - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); return messages.Current.Lsn; } diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index d8fd2ed3a2..802d69be4c 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.IO; using System.Linq; @@ -10,55 +10,44 @@ using Npgsql.Replication; using Npgsql.Replication.PgOutput; using Npgsql.Replication.PgOutput.Messages; +using Npgsql.Util; using TruncateOptions = Npgsql.Replication.PgOutput.Messages.TruncateMessage.TruncateOptions; using ReplicaIdentitySetting = Npgsql.Replication.PgOutput.Messages.RelationMessage.ReplicaIdentitySetting; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Replication; -[TestFixture(ProtocolVersion.V1, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] -[TestFixture(ProtocolVersion.V1, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] -[TestFixture(ProtocolVersion.V2, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] -[TestFixture(ProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] -[TestFixture(ProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] -// We currently don't execute all possible combinations of settings for efficiency reasons because they don't -// interact in the current implementation. -// Feel free to uncomment some or all of the following lines if the implementation changed or you suspect a -// problem with some combination. -// [TestFixture(ProtocolVersion.V1, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.StreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V3, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V3, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] -// [TestFixture(ProtocolVersion.V3, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.StreamingTransactionMode)] -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] +[TestFixture(PgOutputProtocolVersion.V1, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V1, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V2, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V4, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V4, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.ParallelStreamingTransactionMode)] [NonParallelizable] // These tests aren't designed to be parallelizable -public class PgOutputReplicationTests : SafeReplicationTestBase +public class PgOutputReplicationTests( + PgOutputProtocolVersion protocolVersion, + PgOutputReplicationTests.ReplicationDataMode dataMode, + PgOutputReplicationTests.TransactionMode transactionMode) + : SafeReplicationTestBase { - readonly ulong _protocolVersion; - readonly bool? _binary; - readonly bool? _streaming; + readonly bool? _binary = dataMode == ReplicationDataMode.BinaryReplicationDataMode + ? true + : dataMode == ReplicationDataMode.TextReplicationDataMode + ? false + : null; + readonly PgOutputStreamingMode? _streamingMode = transactionMode switch + { + TransactionMode.DefaultTransactionMode => null, + TransactionMode.NonStreamingTransactionMode => PgOutputStreamingMode.Off, + TransactionMode.StreamingTransactionMode => PgOutputStreamingMode.On, + TransactionMode.ParallelStreamingTransactionMode => PgOutputStreamingMode.Parallel, + _ => throw new ArgumentOutOfRangeException(nameof(transactionMode), transactionMode, null) + }; bool IsBinary => _binary ?? false; - bool IsStreaming => _streaming ?? false; - ulong Version => _protocolVersion; - - public PgOutputReplicationTests(ProtocolVersion protocolVersion, ReplicationDataMode dataMode, TransactionMode transactionMode) - { - _protocolVersion = (ulong)protocolVersion; - _binary = dataMode == ReplicationDataMode.BinaryReplicationDataMode - ? true - : dataMode == ReplicationDataMode.TextReplicationDataMode - ? false - : null; - _streaming = transactionMode == TransactionMode.StreamingTransactionMode - ? true - : transactionMode == TransactionMode.NonStreamingTransactionMode - ? false - : null; - } + bool IsStreaming => _streamingMode.HasValue && _streamingMode.Value != PgOutputStreamingMode.Off; + PgOutputProtocolVersion Version => protocolVersion; [Test] public Task CreatePgOutputReplicationSlot() @@ -126,12 +115,27 @@ public Task Insert() Assert.That(insertMsg.Relation, Is.SameAs(relationMsg)); var columnEnumerator = insertMsg.NewRow.GetAsyncEnumerator(); Assert.That(await columnEnumerator.MoveNextAsync(), Is.True); + var postgresType = columnEnumerator.Current.GetPostgresType(); + Assert.That(postgresType.FullName, Is.EqualTo("pg_catalog.integer")); + Assert.That(columnEnumerator.Current.GetDataTypeName(), Is.EqualTo("integer")); + Assert.That(columnEnumerator.Current.GetFieldName(), Is.EqualTo("id")); if (IsBinary) + { + Assert.That(columnEnumerator.Current.GetFieldType(), Is.EqualTo(typeof(int))); Assert.That(await columnEnumerator.Current.Get(), Is.EqualTo(1)); + } else + { + Assert.That(columnEnumerator.Current.GetFieldType(), Is.EqualTo(typeof(string))); Assert.That(await columnEnumerator.Current.Get(), Is.EqualTo("1")); + } Assert.That(await columnEnumerator.MoveNextAsync(), Is.True); + postgresType = columnEnumerator.Current.GetPostgresType(); + Assert.That(postgresType.FullName, Is.EqualTo("pg_catalog.text")); + Assert.That(columnEnumerator.Current.GetDataTypeName(), Is.EqualTo("text")); + Assert.That(columnEnumerator.Current.GetFieldType(), Is.EqualTo(typeof(string))); + Assert.That(columnEnumerator.Current.GetFieldName(), Is.EqualTo("name")); Assert.That(columnEnumerator.Current.IsDBNull, Is.False); Assert.That(await columnEnumerator.Current.Get(), Is.EqualTo("val1")); Assert.That(await columnEnumerator.MoveNextAsync(), Is.False); @@ -641,10 +645,11 @@ await c.ExecuteNonQueryAsync(@$" await NextMessage(messages); }, nameof(Dispose_while_replicating)); - [TestCase(true)] - [TestCase(false)] + [TestCase(true, true)] + [TestCase(true, false)] + [TestCase(false, false)] [Test(Description = "Tests whether logical decoding messages get replicated as Logical Replication Protocol Messages on PostgreSQL 14 and above")] - public Task LogicalDecodingMessage(bool writeMessages) + public Task LogicalDecodingMessage(bool writeMessages, bool readMessages) => SafeReplicationTest( async (slotName, tableName, publicationName) => { @@ -689,9 +694,12 @@ public Task LogicalDecodingMessage(bool writeMessages) Assert.That(msg.Flags, Is.EqualTo(1)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + } } // Relation @@ -712,72 +720,109 @@ public Task LogicalDecodingMessage(bool writeMessages) Assert.That(msg.Flags, Is.EqualTo(0)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + } } - if (IsStreaming) + // PostgreSQL 18 skips logical decoding of already-aborted transactions + if (c.PostgreSqlVersion.IsGreaterOrEqual(18)) { - // Begin Transaction 2 - transactionXid = await AssertTransactionStart(messages); - - // Relation - await NextMessage(messages); - - // Inserts - for (var insertCount = 0; insertCount < 10; insertCount++) - await NextMessage(messages); - - // LogicalDecodingMessage 2 (transactional) + // LogicalDecodingMessage 2 (non-transactional) if (writeMessages) { var msg = await NextMessage(messages); - Assert.That(msg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); - Assert.That(msg.Flags, Is.EqualTo(1)); + Assert.That(msg.TransactionXid, Is.Null); + Assert.That(msg.Flags, Is.EqualTo(0)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); - Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + } + } + } + else + { + if (IsStreaming) + { + // Begin Transaction 2 + transactionXid = await AssertTransactionStart(messages); + + // Relation + await NextMessage(messages); + + // Inserts + for (var insertCount = 0; insertCount < 10; insertCount++) + await NextMessage(messages); + + // LogicalDecodingMessage 2 (transactional) + if (writeMessages) + { + var msg = await NextMessage(messages); + Assert.That(msg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); + Assert.That(msg.Flags, Is.EqualTo(1)); + Assert.That(msg.Prefix, Is.EqualTo(prefix)); + Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + } + } + + // Further inserts + // We don't try to predict how many insert messages we get here + // since the streaming transaction will most likely abort before + // we reach the expected number + while (await messages.MoveNextAsync() && messages.Current is InsertMessage + || messages.Current is StreamStopMessage + && await messages.MoveNextAsync() + && messages.Current is StreamStartMessage + && await messages.MoveNextAsync() + && messages.Current is InsertMessage) + { + // Ignore + } } + else if (writeMessages) + await messages.MoveNextAsync(); - // Further inserts - // We don't try to predict how many insert messages we get here - // since the streaming transaction will most likely abort before - // we reach the expected number - while (await messages.MoveNextAsync() && messages.Current is InsertMessage - || messages.Current is StreamStopMessage - && await messages.MoveNextAsync() - && messages.Current is StreamStartMessage - && await messages.MoveNextAsync() - && messages.Current is InsertMessage) + // LogicalDecodingMessage 3 (non-transactional) + if (writeMessages) { - // Ignore + var msg = (LogicalDecodingMessage)messages.Current; + Assert.That(msg.TransactionXid, Is.Null); + Assert.That(msg.Flags, Is.EqualTo(0)); + Assert.That(msg.Prefix, Is.EqualTo(prefix)); + Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + } + + if (IsStreaming) + await messages.MoveNextAsync(); } - } - else if (writeMessages) - await messages.MoveNextAsync(); - // LogicalDecodingMessage 3 (non-transactional) - if (writeMessages) - { - var msg = (LogicalDecodingMessage)messages.Current; - Assert.That(msg.TransactionXid, Is.Null); - Assert.That(msg.Flags, Is.EqualTo(0)); - Assert.That(msg.Prefix, Is.EqualTo(prefix)); - Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + // Rollback Transaction 2 if (IsStreaming) - await messages.MoveNextAsync(); + { + Assert.That(messages.Current, + _streamingMode == PgOutputStreamingMode.On + ? Is.TypeOf() + : Is.TypeOf()); + } } - // Rollback Transaction 2 - if (IsStreaming) - Assert.That(messages.Current, Is.TypeOf()); - streamingCts.Cancel(); await AssertReplicationCancellation(messages); await rc.DropReplicationSlot(slotName, cancellationToken: CancellationToken.None); @@ -1076,7 +1121,7 @@ public Task TwoPhase([Values]bool commit) { // Streaming of prepared transaction is only supported for // logical streaming replication protocol >= 3 - if (_protocolVersion < 3UL) + if (protocolVersion < PgOutputProtocolVersion.V3) return Task.CompletedTask; return SafePgOutputReplicationTest( @@ -1156,7 +1201,7 @@ public Task TwoPhase([Values]bool commit) public Task Bug4633() { // We don't need all the various test cases here since the bug gets triggered in any case - if (IsStreaming || IsBinary || Version > 1) + if (IsStreaming || IsBinary || Version > PgOutputProtocolVersion.V1) return Task.CompletedTask; return SafePgOutputReplicationTest( @@ -1253,11 +1298,89 @@ await c.ExecuteNonQueryAsync(@$" }, 2); } + [Test(Description = $"Tests whether {nameof(FullUpdateMessage)} instances with unchanged toasted values behave as expected."), Explicit("Massive inserts")] + public Task Update_for_full_replica_identity_with_unchanged_toasted_value() + => SafeReplicationTest( + async (slotName, tableName, publicationName) => + { + await using var c = await OpenConnectionAsync(); + await c.ExecuteNonQueryAsync($$""" + CREATE TABLE {{tableName}} (id INT PRIMARY KEY, name JSONB NOT NULL, something_else INT NULL); + ALTER TABLE {{tableName}} REPLICA IDENTITY FULL; + INSERT INTO {{tableName}} SELECT i, ('{"row_' || i::text || '": [{{string.Join(", ", Enumerable.Range(1, 1024))}}]}')::jsonb, NULL FROM generate_series(1, 15000) s(i); + CREATE PUBLICATION {{publicationName}} FOR TABLE {{tableName}}; + """); + await using var rc = await OpenReplicationConnectionAsync(); + var slot = await rc.CreatePgOutputReplicationSlot(slotName); + + await using var tran = await c.BeginTransactionAsync(); + await c.ExecuteNonQueryAsync($""" + UPDATE {tableName} SET name='"val1_updated"' WHERE id = 1; + UPDATE {tableName} SET something_else = id WHERE id > 1 + """); + await tran.CommitAsync(); + + using var streamingCts = new CancellationTokenSource(); + var messages = SkipEmptyTransactions(rc.StartReplication(slot, GetOptions(publicationName), streamingCts.Token)) + .GetAsyncEnumerator(); + + // Begin Transaction + var transactionXid = await AssertTransactionStart(messages); + + // Relation + var relationMsg = await NextMessage(messages); + + // Update of the first row (updating the jsonb column) + var updateMsg = await NextMessage(messages); + Assert.That(updateMsg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); + Assert.That(updateMsg.Relation, Is.SameAs(relationMsg)); + + var newRowColumnEnumerator = updateMsg.NewRow.GetAsyncEnumerator(); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsUnchangedToastedValue, Is.False); + Assert.That(await newRowColumnEnumerator.Current.Get(), Is.EqualTo("\"val1_updated\"")); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsDBNull, Is.True); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.False); + + + // Update of the following rows (not updating the jsonb column) + updateMsg = await NextMessage(messages); + Assert.That(updateMsg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); + Assert.That(updateMsg.Relation, Is.SameAs(relationMsg)); + + newRowColumnEnumerator = updateMsg.NewRow.GetAsyncEnumerator(); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsUnchangedToastedValue, Is.True); + Assert.That(async () => await newRowColumnEnumerator.Current.Get(), + Throws.TypeOf() + .With.Message.EqualTo("Column 'name' is an unchanged TOASTed value (actual value not sent).")); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsDBNull, Is.False); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.False); + + // Remaining updates + for (var updateCount = 0; updateCount < 14998; updateCount++) + await NextMessage(messages); + + // Commit Transaction + await AssertTransactionCommit(messages); + + streamingCts.Cancel(); + Assert.That(async () => await messages.MoveNextAsync(), Throws.Exception.AssignableTo() + .With.InnerException.InstanceOf() + .And.InnerException.Property(nameof(PostgresException.SqlState)) + .EqualTo(PostgresErrorCodes.QueryCanceled)); + await rc.DropReplicationSlot(slotName, cancellationToken: CancellationToken.None); + }); + #region Non-Test stuff (helper methods, initialization, enums, ...) async Task AssertTransactionStart(IAsyncEnumerator messages) { - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); switch (messages.Current) { @@ -1278,13 +1401,13 @@ await c.ExecuteNonQueryAsync(@$" async Task AssertTransactionCommit(IAsyncEnumerator messages) { - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); switch (messages.Current) { case StreamStopMessage: Assert.That(IsStreaming); - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); Assert.That(messages.Current, Is.TypeOf()); return; case CommitMessage: @@ -1297,10 +1420,10 @@ async Task AssertTransactionCommit(IAsyncEnumerator async Task AssertPrepare(IAsyncEnumerator enumerator) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); if (IsStreaming && enumerator.Current is StreamStopMessage) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); Assert.That(enumerator.Current, Is.TypeOf()); return (PrepareMessageBase)enumerator.Current!; } @@ -1312,16 +1435,16 @@ async Task AssertPrepare(IAsyncEnumerator NextMessage(IAsyncEnumerator enumerator, bool expectRelationMessage = false) where TExpected : PgOutputReplicationMessage { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); if (IsStreaming && enumerator.Current is StreamStopMessage) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); Assert.That(enumerator.Current, Is.TypeOf()); - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); if (expectRelationMessage) { Assert.That(enumerator.Current, Is.TypeOf()); - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); } } @@ -1361,7 +1484,7 @@ async IAsyncEnumerable SkipEmptyTransactions(IAsyncE } PgOutputReplicationOptions GetOptions(string publicationName, bool? messages = null) - => new(publicationName, _protocolVersion, _binary, _streaming, messages); + => new(publicationName, protocolVersion, _binary, _streamingMode, messages); Task SafePgOutputReplicationTest(Func testAction, [CallerMemberName] string memberName = "") => SafeReplicationTest(testAction, GetObjectName(memberName)); @@ -1372,11 +1495,11 @@ Task SafePgOutputReplicationTest(Func testAction string GetObjectName(string memberName) { var sb = new StringBuilder(memberName) - .Append("_v").Append(_protocolVersion); + .Append("_v").Append(protocolVersion); if (_binary.HasValue) sb.Append("_b_").Append(BoolToChar(_binary.Value)); - if (_streaming.HasValue) - sb.Append("_s_").Append(BoolToChar(_streaming.Value)); + if (_streamingMode.HasValue) + sb.Append("_s_").Append(_streamingMode.Value); return sb.ToString(); } @@ -1391,15 +1514,25 @@ public async Task SetUp() { await using var c = await OpenConnectionAsync(); TestUtil.MinimumPgVersion(c, "10.0", "The Logical Replication Protocol (via pgoutput plugin) was introduced in PostgreSQL 10"); - if (_protocolVersion > 2) + if (protocolVersion > PgOutputProtocolVersion.V3) + TestUtil.MinimumPgVersion(c, "16.0", "Logical Streaming Replication Protocol version 4 was introduced in PostgreSQL 16"); + if (protocolVersion > PgOutputProtocolVersion.V2) TestUtil.MinimumPgVersion(c, "15.0", "Logical Streaming Replication Protocol version 3 was introduced in PostgreSQL 15"); - if (_protocolVersion > 1) + if (protocolVersion > PgOutputProtocolVersion.V1) TestUtil.MinimumPgVersion(c, "14.0", "Logical Streaming Replication Protocol version 2 was introduced in PostgreSQL 14"); if (IsBinary) TestUtil.MinimumPgVersion(c, "14.0", "Sending replication values in binary representation was introduced in PostgreSQL 14"); if (IsStreaming) { - TestUtil.MinimumPgVersion(c, "14.0", "Streaming of in-progress transactions was introduced in PostgreSQL 14"); + switch (_streamingMode) + { + case PgOutputStreamingMode.On: + TestUtil.MinimumPgVersion(c, "14.0", "Streaming of in-progress transactions was introduced in PostgreSQL 14"); + break; + case PgOutputStreamingMode.Parallel: + TestUtil.MinimumPgVersion(c, "16.0", "Parallel streaming of in-progress transactions was introduced in PostgreSQL 16"); + break; + } var logicalDecodingWorkMem = (string)(await c.ExecuteScalarAsync("SHOW logical_decoding_work_mem"))!; if (logicalDecodingWorkMem != "64kB") { @@ -1410,12 +1543,6 @@ public async Task SetUp() } } - public enum ProtocolVersion : ulong - { - V1 = 1UL, - V2 = 2UL, - V3 = 3UL, - } public enum ReplicationDataMode { DefaultReplicationDataMode, @@ -1427,6 +1554,7 @@ public enum TransactionMode DefaultTransactionMode, NonStreamingTransactionMode, StreamingTransactionMode, + ParallelStreamingTransactionMode } #endregion Non-Test stuff (helper methods, initialization, ennums, ...) diff --git a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs index 948ffabc7a..4c4efab2e5 100644 --- a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading; using System.Threading.Tasks; using NUnit.Framework; @@ -53,7 +53,7 @@ FROM pg_replication_slots await using var reader = await cmd.ExecuteReaderAsync(); Assert.That(reader.Read, Is.EqualTo(createSlot)); var expectedSlotName = createSlot ? reader.GetFieldValue(reader.GetOrdinal("slot_name")) : null; - var expectedTli = createSlot ? unchecked((ulong?)reader.GetFieldValue(reader.GetOrdinal("timeline_id"))) : null; + var expectedTli = createSlot ? (uint?)reader.GetFieldValue(reader.GetOrdinal("timeline_id")) : null; var expectedRestartLsn = createSlot ? reader.GetFieldValue(reader.GetOrdinal("restart_lsn")) : null; Assert.That(reader.Read, Is.False); await using var rc = await OpenReplicationConnectionAsync(); @@ -90,7 +90,7 @@ public Task Replication_with_slot() // other transactions possibly from system processes can // interfere here, inserting additional messages, but more // likely we'll get everything in one big chunk. - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); var message = messages.Current; Assert.That(message.WalStart, Is.EqualTo(info.XLogPos)); Assert.That(message.WalEnd, Is.GreaterThan(message.WalStart)); @@ -128,7 +128,7 @@ public async Task Replication_without_slot() // other transactions possibly from system processes can // interfere here, inserting additional messages, but more // likely we'll get everything in one big chunk. - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); var message = messages.Current; Assert.That(message.WalStart, Is.EqualTo(info.XLogPos)); Assert.That(message.WalEnd, Is.GreaterThan(message.WalStart)); diff --git a/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs b/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs index 77f67eaf4b..3034dee2f1 100644 --- a/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs +++ b/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Runtime.CompilerServices; using System.Text.RegularExpressions; diff --git a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs index 732c1a3e67..694b6b420d 100644 --- a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs @@ -1,4 +1,3 @@ -using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; @@ -13,7 +12,6 @@ namespace Npgsql.Tests.Replication; /// implementation of logical replication was still somewhat incomplete. /// Please don't change them without confirming that they still work on those old versions. /// -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] // These tests aren't designed to be parallelizable public class TestDecodingReplicationTests : SafeReplicationTestBase { @@ -328,7 +326,7 @@ await c.ExecuteNonQueryAsync(@$" static async ValueTask NextMessage(IAsyncEnumerator enumerator) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); return enumerator.Current!; } diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 83f9e859c6..cf8fcc9e8f 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using NUnit.Framework; using System; using System.Data; @@ -10,7 +10,7 @@ namespace Npgsql.Tests; -public class SchemaTests : SyncOrAsyncTestBase +public class SchemaTests(SyncOrAsync syncOrAsync) : SyncOrAsyncTestBase(syncOrAsync) { [Test] public async Task MetaDataCollections() @@ -47,7 +47,7 @@ public async Task No_parameter() Assert.That(collections1, Is.EquivalentTo(collections2)); } - [Test, Description("Calling GetSchema(collectionName [, restrictions]) case insensive collectionName can be used")] + [Test, Description("Calling GetSchema(collectionName [, restrictions]) case insensitive collectionName can be used")] public async Task Case_insensitive_collection_name() { await using var conn = await OpenConnectionAsync(); @@ -224,12 +224,40 @@ public async Task ReservedWords() Assert.That(reservedWords.Rows, Has.Count.GreaterThan(0)); } + [Test] + public async Task Databases() + { + await using var conn = await OpenConnectionAsync(); + var database = await conn.ExecuteScalarAsync("SELECT current_database()"); + + var dataTable = await GetSchema(conn, "Databases"); + var databases = dataTable.Rows + .Cast() + .Select(r => (string)r["database_name"]) + .ToList(); + + Assert.That(databases, Does.Contain(database)); + } + + [Test] + public async Task Schemata() + { + await using var conn = await OpenConnectionAsync(); + var schema = await CreateTempSchema(conn); + + var dataTable = await GetSchema(conn, "Schemata"); + var row = dataTable.Rows.Cast().Single(r => (string)r["schema_name"] == schema); + + Assert.That(row["catalog_name"], Is.EqualTo(await conn.ExecuteScalarAsync("SELECT current_database()"))); + Assert.That(row["schema_owner"], Is.EqualTo(await conn.ExecuteScalarAsync("SELECT current_user"))); + } + [Test] public async Task ForeignKeys() { await using var conn = await OpenConnectionAsync(); var dt = await GetSchema(conn, "ForeignKeys"); - Assert.IsNotNull(dt); + Assert.That(dt, Is.Not.Null); } [Test] @@ -248,7 +276,7 @@ public async Task ParameterMarkerFormat() command.CommandText = $"SELECT * FROM {table} WHERE int=" + string.Format(parameterMarkerFormat, parameterName); command.Parameters.Add(new NpgsqlParameter(parameterName, 4)); await using var reader = await command.ExecuteReaderAsync(); - Assert.IsTrue(reader.Read()); + Assert.That(reader.Read()); } [Test] @@ -258,7 +286,7 @@ public async Task Precision_and_scale() var table = await CreateTempTable( conn, "explicit_both NUMERIC(10,2), explicit_precision NUMERIC(10), implicit_both NUMERIC, integer INTEGER, text TEXT"); - var dataTable = await GetSchema(conn, "Columns", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "Columns", [null, null, table]); var rows = dataTable.Rows.Cast().ToList(); var explicitBoth = rows.Single(r => (string)r["column_name"] == "explicit_both"); @@ -311,7 +339,7 @@ public async Task GetSchema_tables_with_restrictions() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "bar INTEGER"); - var dt = await GetSchema(conn, "Tables", new[] { null, null, table }); + var dt = await GetSchema(conn, "Tables", [null, null, table]); foreach (var row in dt.Rows.OfType()) Assert.That(row["table_name"], Is.EqualTo(table)); } @@ -324,18 +352,31 @@ public async Task GetSchema_views_with_restrictions() await conn.ExecuteNonQueryAsync($"CREATE VIEW {view} AS SELECT 8 AS foo"); - var dt = await GetSchema(conn, "Views", new[] { null, null, view }); + var dt = await GetSchema(conn, "Views", [null, null, view]); foreach (var row in dt.Rows.OfType()) Assert.That(row["table_name"], Is.EqualTo(view)); } + [Test] + public async Task GetSchema_materialized_views_with_restrictions() + { + await using var conn = await OpenConnectionAsync(); + var viewName = await GetTempMaterializedViewName(conn); + + await conn.ExecuteNonQueryAsync($"CREATE MATERIALIZED VIEW {viewName} AS SELECT 8 AS foo"); + + var dt = await GetSchema(conn, "MaterializedViews", [null, viewName, null, null]); + foreach (var row in dt.Rows.OfType()) + Assert.That(row["table_name"], Is.EqualTo(viewName)); + } + [Test] public async Task Primary_key() { await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id INT PRIMARY KEY, f1 INT"); - var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", [null, null, table]); var column = dataTable.Rows.Cast().Single(); Assert.That(column["table_schema"], Is.EqualTo("public")); @@ -350,7 +391,7 @@ public async Task Primary_key_composite() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id1 INT, id2 INT, f1 INT, PRIMARY KEY (id1, id2)"); - var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", [null, null, table]); var columns = dataTable.Rows.Cast().OrderBy(r => r["ordinal_number"]).ToList(); Assert.That(columns.All(r => r["table_schema"].Equals("public"))); @@ -369,7 +410,7 @@ public async Task Unique_constraint() var database = await conn.ExecuteScalarAsync("SELECT current_database()"); - var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", [null, null, table]); var columns = dataTable.Rows.Cast().ToList(); Assert.That(columns.All(r => r["constraint_catalog"].Equals(database))); @@ -380,11 +421,16 @@ public async Task Unique_constraint() Assert.That(columns.All(r => r["table_name"].Equals(table))); Assert.That(columns.All(r => r["constraint_type"].Equals("UNIQUE KEY"))); - Assert.That(columns[0]["column_name"], Is.EqualTo("f1")); - Assert.That(columns[0]["ordinal_number"], Is.EqualTo(1)); + Assert.That(columns.Count, Is.EqualTo(2)); - Assert.That(columns[1]["column_name"], Is.EqualTo("f2")); - Assert.That(columns[1]["ordinal_number"], Is.EqualTo(2)); + // Columns are not necessarily in the correct order + var firstColumn = columns.FirstOrDefault(x => (string)x["column_name"] == "f1")!; + Assert.That(firstColumn, Is.Not.Null); + Assert.That(firstColumn["ordinal_number"], Is.EqualTo(1)); + + var secondColumn = columns.FirstOrDefault(x => (string)x["column_name"] == "f2")!; + Assert.That(secondColumn, Is.Not.Null); + Assert.That(secondColumn["ordinal_number"], Is.EqualTo(2)); } [Test] @@ -402,7 +448,7 @@ await conn.ExecuteNonQueryAsync(@$" var database = await conn.ExecuteScalarAsync("SELECT current_database()"); - var dataTable = await GetSchema(conn, "INDEXES", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "INDEXES", [null, null, table]); var index = dataTable.Rows.Cast().Single(); Assert.That(index["table_schema"], Is.EqualTo("public")); @@ -410,7 +456,7 @@ await conn.ExecuteNonQueryAsync(@$" Assert.That(index["index_name"], Is.EqualTo(constraint)); Assert.That(index["type_desc"], Is.EqualTo("")); - string[] indexColumnRestrictions = { null!, null!, table }; + string[] indexColumnRestrictions = [null!, null!, table]; var dataTable2 = await GetSchema(conn, "INDEXCOLUMNS", indexColumnRestrictions); var columns = dataTable2.Rows.Cast().ToList(); @@ -424,7 +470,7 @@ await conn.ExecuteNonQueryAsync(@$" Assert.That(columns[0]["column_name"], Is.EqualTo("f1")); Assert.That(columns[1]["column_name"], Is.EqualTo("f2")); - string[] indexColumnRestrictions3 = { (string) database! , "public", table, constraint, "f1" }; + string[] indexColumnRestrictions3 = [(string) database! , "public", table, constraint, "f1"]; var dataTable3 = await GetSchema(conn, "INDEXCOLUMNS", indexColumnRestrictions3); var columns3 = dataTable3.Rows.Cast().ToList(); Assert.That(columns3.Count, Is.EqualTo(1)); @@ -487,7 +533,7 @@ vbit bit varying(5), cid cid"; var table = await CreateTempTable(conn, columnDefinition); - var columnsSchema = await GetSchema(conn, "Columns", new[] { null, null, table }); + var columnsSchema = await GetSchema(conn, "Columns", [null, null, table]); var columns = columnsSchema.Rows.Cast().ToList(); var dataTypes = await GetSchema(conn, DbMetaDataCollectionNames.DataTypes); @@ -508,7 +554,7 @@ await conn.ExecuteNonQueryAsync($@" CREATE TYPE {enumName} AS ENUM ('red', 'yellow', 'blue'); CREATE TABLE {table} (color {enumName});"); - var dataTable = await GetSchema(conn, "Columns", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "Columns", [null, null, table]); var row = dataTable.Rows.Cast().Single(); Assert.That(row["data_type"], Is.EqualTo(enumName)); } @@ -525,12 +571,18 @@ await conn.ExecuteNonQueryAsync($@" CREATE TYPE {schema}.{enumName} AS ENUM ('red', 'yellow', 'blue'); CREATE TABLE {table} (color {schema}.{enumName});"); - var dataTable = await GetSchema(conn, "Columns", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "Columns", [null, null, table]); var row = dataTable.Rows.Cast().Single(); Assert.That(row["data_type"], Is.EqualTo($"{schema}.{enumName}")); } - public SchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } + [Test] + public async Task SlimBuilder_introspection_without_unsupported_type_exceptions() + { + await using var dataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(() => GetSchema(conn, DbMetaDataCollectionNames.DataTypes), Throws.Nothing); + } // ReSharper disable MethodHasAsyncOverload async Task GetSchema(NpgsqlConnection conn) diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index a3240bde86..a0594fb971 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -1,5 +1,9 @@ -using System; +using System; +using System.IO; +using System.Runtime.InteropServices; using System.Security.Authentication; +using System.Security.Cryptography; +using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; using Npgsql.Properties; @@ -11,31 +15,27 @@ namespace Npgsql.Tests; public class SecurityTests : TestBase { [Test, Description("Establishes an SSL connection, assuming a self-signed server certificate")] - public void Basic_ssl() + public async Task Basic_ssl() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); - Assert.That(conn.IsSecure, Is.True); + csb.SslMode = SslMode.Require; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.IsSslEncrypted, Is.True); } [Test, Description("Default user must run with md5 password encryption")] - public void Default_user_uses_md5_password() + public async Task Default_user_uses_md5_password() { if (!IsOnBuildServer) Assert.Ignore("Only executed in CI"); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + }); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.IsScram, Is.False); Assert.That(conn.IsScramPlus, Is.False); } @@ -59,13 +59,11 @@ public void Reject_self_signed_certificate([Values(SslMode.VerifyCA, SslMode.Ver [Test, Description("Makes sure that ssl_renegotiation_limit is always 0, renegotiation is buggy")] public void No_ssl_renegotiation() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + }); + using var conn = dataSource.OpenConnection(); Assert.That(conn.ExecuteScalar("SHOW ssl_renegotiation_limit"), Is.EqualTo("0")); conn.ExecuteNonQuery("DISCARD ALL"); Assert.That(conn.ExecuteScalar("SHOW ssl_renegotiation_limit"), Is.EqualTo("0")); @@ -74,12 +72,9 @@ public void No_ssl_renegotiation() [Test, Description("Makes sure that when SSL is disabled IsSecure returns false")] public void IsSecure_without_ssl() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Disable - }; - using var conn = OpenConnection(csb); - Assert.That(conn.IsSecure, Is.False); + using var dataSource = CreateDataSource(csb => csb.SslMode = SslMode.Disable); + using var conn = dataSource.OpenConnection(); + Assert.That(conn.IsSslEncrypted, Is.False); } [Test, Explicit("Needs to be set up (and run with with Kerberos credentials on Linux)")] @@ -89,8 +84,8 @@ public void IntegratedSecurity_with_Username() if (username == null) throw new Exception("Could find username"); - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - IntegratedSecurity = true, + var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + { Username = username, Password = null }.ToString(); @@ -113,7 +108,6 @@ public void IntegratedSecurity_without_Username() { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - IntegratedSecurity = true, Username = null, Password = null }.ToString(); @@ -136,7 +130,6 @@ public void Connection_database_is_populated_on_Open() { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - IntegratedSecurity = true, Username = null, Password = null, Database = null @@ -159,13 +152,12 @@ public void Connection_database_is_populated_on_Open() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1718")] public void Bug1718() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + }); + using var conn = dataSource.OpenConnection(); + using var tx = conn.BeginTransaction(); using var cmd = CreateSleepCommand(conn, 10000); var cts = new CancellationTokenSource(1000).Token; Assert.That(async () => await cmd.ExecuteNonQueryAsync(cts), Throws.Exception @@ -177,22 +169,26 @@ public void Bug1718() [Test] public void ScramPlus() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Require, - Username = "npgsql_tests_scram", - Password = "npgsql_tests_scram", - TrustServerCertificate = true - }; - try { - using var conn = OpenConnection(csb); - // scram-sha-256-plus only works begining from PostgreSQL 11 + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Require; + csb.Username = "npgsql_tests_scram"; + csb.Password = "npgsql_tests_scram"; + }); + using var conn = dataSource.OpenConnection(); + // scram-sha-256-plus only works beginning from PostgreSQL 11 if (conn.PostgreSqlVersion.Major >= 11) { + Assert.That(conn.IsScram, Is.False); Assert.That(conn.IsScramPlus, Is.True); } + else + { + Assert.That(conn.IsScram, Is.True); + Assert.That(conn.IsScramPlus, Is.False); + } } catch (Exception e) when (!IsOnBuildServer) { @@ -202,83 +198,83 @@ public void ScramPlus() } [Test] - public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, [Values] bool keepAlive) + public void ScramPlus_channel_binding([Values] ChannelBinding channelBinding) { - if (multiplexing && keepAlive) - { - Assert.Ignore("Multiplexing doesn't support keepalive"); - } - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Allow, - Username = "npgsql_tests_ssl", - Password = "npgsql_tests_ssl", - Multiplexing = multiplexing, - KeepAlive = keepAlive ? 10 : 0 - }; - try { - await using var conn = await OpenConnectionAsync(csb); - Assert.IsTrue(conn.IsSecure); + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Require; + csb.Username = "npgsql_tests_scram"; + csb.Password = "npgsql_tests_scram"; + csb.ChannelBinding = channelBinding; + }); + // scram-sha-256-plus only works beginning from PostgreSQL 11 + MinimumPgVersion(dataSource, "11.0"); + using var conn = dataSource.OpenConnection(); + + if (channelBinding == ChannelBinding.Disable) + { + Assert.That(conn.IsScram, Is.True); + Assert.That(conn.IsScramPlus, Is.False); + } + else + { + Assert.That(conn.IsScram, Is.False); + Assert.That(conn.IsScramPlus, Is.True); + } } catch (Exception e) when (!IsOnBuildServer) { Console.WriteLine(e); - Assert.Ignore("Only ssl user doesn't seem to be set up"); + Assert.Ignore("scram-sha-256-plus doesn't seem to be set up"); } } [Test] - public void SslMode_Require_throws_without_TSC() + public async Task Connect_with_only_ssl_allowed_user([Values] bool keepAlive) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + try { - SslMode = SslMode.Require - }; - - var ex = Assert.ThrowsAsync(async () => await OpenConnectionAsync(csb))!; - Assert.That(ex.Message, Is.EqualTo(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate)); - } - - [Test] - public async Task SslMode_Require_with_callback_without_TSC() - { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Allow; + csb.Username = "npgsql_tests_ssl"; + csb.Password = "npgsql_tests_ssl"; + csb.KeepAlive = keepAlive ? 10 : 0; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.IsSslEncrypted); + } + catch (Exception e) when (!IsOnBuildServer) { - SslMode = SslMode.Require, - TrustServerCertificate = false, - Pooling = false - }; - - using var connection = CreateConnection(csb.ToString()); - connection.UserCertificateValidationCallback = (_, _, _, _) => true; - - await connection.OpenAsync(); + Console.WriteLine(e); + Assert.Ignore("Only ssl user doesn't seem to be set up"); + } } [Test] - public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplexing, [Values] bool keepAlive) + [Platform(Exclude = "Win", Reason = "Postgresql doesn't close connection correctly on windows which might result in missing error message")] + public async Task Connect_with_only_non_ssl_allowed_user([Values] bool keepAlive) { - if (multiplexing && keepAlive) + try { - Assert.Ignore("Multiplexing doesn't support keepalive"); + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Prefer; + csb.Username = "npgsql_tests_nossl"; + csb.Password = "npgsql_tests_nossl"; + csb.KeepAlive = keepAlive ? 10 : 0; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.IsSslEncrypted, Is.False); } - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + catch (NpgsqlException ex) when (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && ex.InnerException is IOException) { - SslMode = SslMode.Prefer, - Username = "npgsql_tests_nossl", - Password = "npgsql_tests_nossl", - Multiplexing = multiplexing, - KeepAlive = keepAlive ? 10 : 0 - }; - - try - { - await using var conn = await OpenConnectionAsync(csb); - Assert.IsFalse(conn.IsSecure); + // Windows server to windows client invites races that can cause the socket to be reset before all data can be read. + // https://www.postgresql.org/message-id/flat/90b34057-4176-7bb0-0dbb-9822a5f6425b%40greiz-reinsdorf.de + // https://www.postgresql.org/message-id/flat/16678-253e48d34dc0c376@postgresql.org + Assert.Ignore(); } catch (Exception e) when (!IsOnBuildServer) { @@ -288,16 +284,19 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex } [Test] - public async Task DataSource_UserCertificateValidationCallback_is_invoked([Values] bool acceptCertificate) + public async Task DataSource_SslClientAuthenticationOptionsCallback_is_invoked([Values] bool acceptCertificate) { var callbackWasInvoked = false; var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.ConnectionStringBuilder.SslMode = SslMode.Require; - dataSourceBuilder.UseUserCertificateValidationCallback((_, _, _, _) => + dataSourceBuilder.UseSslClientAuthenticationOptionsCallback(options => { - callbackWasInvoked = true; - return acceptCertificate; + options.RemoteCertificateValidationCallback = (_, _, _, _) => + { + callbackWasInvoked = true; + return acceptCertificate; + }; }); await using var dataSource = dataSourceBuilder.Build(); await using var connection = dataSource.CreateConnection(); @@ -314,7 +313,7 @@ public async Task DataSource_UserCertificateValidationCallback_is_invoked([Value } [Test] - public async Task Connection_UserCertificateValidationCallback_is_invoked([Values] bool acceptCertificate) + public async Task Connection_SslClientAuthenticationOptionsCallback_is_invoked([Values] bool acceptCertificate) { var callbackWasInvoked = false; @@ -322,10 +321,13 @@ public async Task Connection_UserCertificateValidationCallback_is_invoked([Value dataSourceBuilder.ConnectionStringBuilder.SslMode = SslMode.Require; await using var dataSource = dataSourceBuilder.Build(); await using var connection = dataSource.CreateConnection(); - connection.UserCertificateValidationCallback = (_, _, _, _) => + connection.SslClientAuthenticationOptionsCallback = options => { - callbackWasInvoked = true; - return acceptCertificate; + options.RemoteCertificateValidationCallback = (_, _, _, _) => + { + callbackWasInvoked = true; + return acceptCertificate; + }; }; if (acceptCertificate) @@ -342,54 +344,53 @@ public async Task Connection_UserCertificateValidationCallback_is_invoked([Value [Test] public void Connect_with_Verify_and_callback_throws([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => csb.SslMode = sslMode); + using var connection = dataSource.CreateConnection(); + connection.SslClientAuthenticationOptionsCallback = options => { - SslMode = sslMode + options.RemoteCertificateValidationCallback = (_, _, _, _) => true; }; - var connection = CreateConnection(csb.ToString()); - connection.UserCertificateValidationCallback = (_, _, _, _) => true; - var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync())!; - Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode))); + Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslVerifyWithCustomValidationCallback, sslMode))); } [Test] public void Connect_with_RootCertificate_and_callback_throws() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - RootCertificate = "foo" + csb.SslMode = SslMode.Require; + csb.RootCertificate = "foo"; + }); + using var connection = dataSource.CreateConnection(); + connection.SslClientAuthenticationOptionsCallback = options => + { + options.RemoteCertificateValidationCallback = (_, _, _, _) => true; }; - var connection = CreateConnection(csb.ToString()); - connection.UserCertificateValidationCallback = (_, _, _, _) => true; - var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync())!; - Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback))); + Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithCustomValidationCallback))); } [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/4305")] public async Task Bug4305_Secure([Values] bool async) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - Username = "npgsql_tests_ssl", - Password = "npgsql_tests_ssl", - MaxPoolSize = 1, - TrustServerCertificate = true - }; - using var _ = CreateTempPool(csb, out var connString); + csb.SslMode = SslMode.Require; + csb.Username = "npgsql_tests_ssl"; + csb.Password = "npgsql_tests_ssl"; + csb.MaxPoolSize = 1; + }); NpgsqlConnection conn = default!; try { - conn = await OpenConnectionAsync(connString); - Assert.IsTrue(conn.IsSecure); + conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.IsSslEncrypted); } catch (Exception e) when (!IsOnBuildServer) { @@ -398,20 +399,23 @@ public async Task Bug4305_Secure([Values] bool async) } await using var __ = conn; - var originalConnector = conn.Connector; - await using var cmd = conn.CreateCommand(); - cmd.CommandText = "select pg_sleep(30)"; - cmd.CommandTimeout = 3; - var ex = async - ? Assert.ThrowsAsync(() => cmd.ExecuteNonQueryAsync())! - : Assert.Throws(() => cmd.ExecuteNonQuery())!; - Assert.That(ex.InnerException, Is.TypeOf()); + await using (var tx = await conn.BeginTransactionAsync()) + { + var originalConnector = conn.Connector; - await conn.CloseAsync(); - await conn.OpenAsync(); + cmd.CommandText = "select pg_sleep(30)"; + cmd.CommandTimeout = 3; + var ex = async + ? Assert.ThrowsAsync(() => cmd.ExecuteNonQueryAsync())! + : Assert.Throws(() => cmd.ExecuteNonQuery())!; + Assert.That(ex.InnerException, Is.TypeOf()); - Assert.AreSame(originalConnector, conn.Connector); + await conn.CloseAsync(); + await conn.OpenAsync(); + + Assert.That(conn.Connector, Is.SameAs(originalConnector)); + } cmd.CommandText = "SELECT 1"; if (async) @@ -424,21 +428,20 @@ public async Task Bug4305_Secure([Values] bool async) [IssueLink("https://github.com/npgsql/npgsql/issues/4305")] public async Task Bug4305_not_Secure([Values] bool async) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Disable, - Username = "npgsql_tests_nossl", - Password = "npgsql_tests_nossl", - MaxPoolSize = 1 - }; - using var _ = CreateTempPool(csb, out var connString); + csb.SslMode = SslMode.Disable; + csb.Username = "npgsql_tests_nossl"; + csb.Password = "npgsql_tests_nossl"; + csb.MaxPoolSize = 1; + }); NpgsqlConnection conn = default!; try { - conn = await OpenConnectionAsync(connString); - Assert.IsFalse(conn.IsSecure); + conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.IsSslEncrypted, Is.False); } catch (Exception e) when (!IsOnBuildServer) { @@ -460,7 +463,7 @@ public async Task Bug4305_not_Secure([Values] bool async) await conn.CloseAsync(); await conn.OpenAsync(); - Assert.AreSame(originalConnector, conn.Connector); + Assert.That(conn.Connector, Is.SameAs(originalConnector)); cmd.CommandText = "SELECT 1"; if (async) @@ -469,6 +472,149 @@ public async Task Bug4305_not_Secure([Values] bool async) Assert.DoesNotThrow(() => cmd.ExecuteNonQuery()); } + [Test] + public async Task Direct_ssl_negotiation() + { + await using var adminConn = await OpenConnectionAsync(); + MinimumPgVersion(adminConn, "17.0"); + + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Require; + csb.SslNegotiation = SslNegotiation.Direct; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.IsSslEncrypted); + } + + [Test] + public void Direct_ssl_requires_correct_sslmode([Values] SslMode sslMode) + { + if (sslMode is SslMode.Disable or SslMode.Allow or SslMode.Prefer) + { + var ex = Assert.Throws(() => + { + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + csb.SslNegotiation = SslNegotiation.Direct; + }); + })!; + Assert.That(ex.Message, Is.EqualTo("SSL Mode has to be Require or higher to be used with direct SSL Negotiation")); + } + else + { + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + csb.SslNegotiation = SslNegotiation.Direct; + }); + } + } + + [Test] + [Platform(Exclude = "MacOsX", Reason = "Mac requires explicit opt-in to receive CA certificate in TLS handshake")] + public async Task Connect_with_verify_and_ca_cert([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode) + { + if (!IsOnBuildServer) + Assert.Ignore("Only executed in CI"); + + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + csb.RootCertificate = "ca.crt"; + }); + + await using var _ = await dataSource.OpenConnectionAsync(); + } + + [Test] + [Platform(Exclude = "MacOsX", Reason = "Mac requires explicit opt-in to receive CA certificate in TLS handshake")] + public async Task Connect_with_verify_check_host([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode) + { + if (!IsOnBuildServer) + Assert.Ignore("Only executed in CI"); + + await using var dataSource = CreateDataSource(csb => + { + csb.Host = "127.0.0.1"; + csb.SslMode = sslMode; + csb.RootCertificate = "ca.crt"; + }); + + if (sslMode == SslMode.VerifyCA) + { + await using var _ = await dataSource.OpenConnectionAsync(); + } + else + { + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.InnerException, Is.TypeOf()); + } + } + + [Test] + [Platform(Exclude = "MacOsX", Reason = "Mac requires explicit opt-in to receive CA certificate in TLS handshake")] + public async Task Connect_with_verify_and_multiple_ca_cert([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode, [Values] bool realCaFirst) + { + if (!IsOnBuildServer) + Assert.Ignore("Only executed in CI"); + + var certificates = new X509Certificate2Collection(); + + using var realCaCert = X509CertificateLoader.LoadCertificateFromFile("ca.crt"); + + using var ecdsa = ECDsa.Create(); + var req = new CertificateRequest("cn=localhost", ecdsa, HashAlgorithmName.SHA256); + using var unrelatedCaCert = req.CreateSelfSigned(DateTimeOffset.UtcNow.AddDays(-1), DateTimeOffset.UtcNow.AddDays(1)); + + if (realCaFirst) + { + certificates.Add(realCaCert); + certificates.Add(unrelatedCaCert); + } + else + { + certificates.Add(unrelatedCaCert); + certificates.Add(realCaCert); + } + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.SslMode = sslMode; + dataSourceBuilder.UseRootCertificates(certificates); + + await using var dataSource = dataSourceBuilder.Build(); + + await using var _ = await dataSource.OpenConnectionAsync(); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Direct_ssl_via_env_requires_correct_sslmode() + { + await using var adminConn = await OpenConnectionAsync(); + MinimumPgVersion(adminConn, "17.0"); + + // NonParallelizable attribute doesn't work with parameters that well + foreach (var sslMode in new[] { SslMode.Disable, SslMode.Allow, SslMode.Prefer, SslMode.Require }) + { + using var _ = SetEnvironmentVariable("PGSSLNEGOTIATION", nameof(SslNegotiation.Direct)); + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + }); + if (sslMode is SslMode.Disable or SslMode.Allow or SslMode.Prefer) + { + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.Message, Is.EqualTo("SSL Mode has to be Require or higher to be used with direct SSL Negotiation")); + } + else + { + await using var conn = await dataSource.OpenConnectionAsync(); + } + } + } + #region Setup / Teardown / Utils [OneTimeSetUp] diff --git a/test/Npgsql.Tests/SizeTests.cs b/test/Npgsql.Tests/SizeTests.cs new file mode 100644 index 0000000000..93bd3b8d29 --- /dev/null +++ b/test/Npgsql.Tests/SizeTests.cs @@ -0,0 +1,59 @@ +using System; +using NUnit.Framework; +using Npgsql.Internal; + +namespace Npgsql.Tests; + +public class SizeTests +{ + [Test] + public void UnknownKind() => Assert.That(Size.Unknown.Kind, Is.EqualTo(SizeKind.Unknown)); + + [Test] + public void UnknownThrowsOnValue() => Assert.Throws(() => _ = Size.Unknown.Value); + + [Test] + public void Exact() + { + Assert.That(Size.Create(1).Value, Is.EqualTo(1)); + Assert.That(Size.Create(1).Kind, Is.EqualTo(SizeKind.Exact)); + } + + [Test] + public void ZeroIsExactKind() => Assert.That(Size.Zero.Kind, Is.EqualTo(SizeKind.Exact)); + + [Test] + public void UpperBound() + { + Assert.That(Size.CreateUpperBound(1).Value, Is.EqualTo(1)); + Assert.That(Size.CreateUpperBound(1).Kind, Is.EqualTo(SizeKind.UpperBound)); + } + + [Test] + public void CombineThrowsOnOverflow() => Assert.Throws(() => Size.Create(1).Combine(int.MaxValue)); + + [Test] + public void CombineExactWorks() => Assert.That(Size.Create(1).Combine(1), Is.EqualTo(Size.Create(2))); + + [Test] + public void CombineUpperBoundWorks() => Assert.That(Size.CreateUpperBound(1).Combine(1), Is.EqualTo(Size.CreateUpperBound(2))); + + [Test] + public void CombineUnknownWithAnyGivesUnknown() + { + Assert.That(Size.Unknown.Combine(Size.Unknown), Is.EqualTo(Size.Unknown)); + + Assert.That(Size.Create(1).Combine(Size.Unknown), Is.EqualTo(Size.Unknown)); + Assert.That(Size.Unknown.Combine(Size.Create(1)), Is.EqualTo(Size.Unknown)); + + Assert.That(Size.Unknown.Combine(Size.CreateUpperBound(1)), Is.EqualTo(Size.Unknown)); + Assert.That(Size.CreateUpperBound(1).Combine(Size.Unknown), Is.EqualTo(Size.Unknown)); + } + + [Test] + public void CombineUpperBoundWithExactGivesUpperBound() + { + Assert.That(Size.Create(1).Combine(Size.CreateUpperBound(1)), Is.EqualTo(Size.CreateUpperBound(2))); + Assert.That(Size.CreateUpperBound(1).Combine(Size.Create(1)), Is.EqualTo(Size.CreateUpperBound(2))); + } +} diff --git a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs index 7a43f70ad1..0385e2a877 100644 --- a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs +++ b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs @@ -1,52 +1,74 @@ -using System.Collections.Generic; +using System.Collections.Generic; +using System.Globalization; using System.Linq; using Npgsql.NameTranslation; using NUnit.Framework; namespace Npgsql.Tests; -[TestFixture] public class SnakeCaseNameTranslatorTests { + static readonly CultureInfo trTRCulture = new("tr-TR"); + static readonly CultureInfo enUSCulture = new("en-US"); + [Test, TestCaseSource(typeof(SnakeCaseNameTranslatorTests), nameof(TestCases))] - public string TranslateTypeName(string value, bool legacyMode) - => new NpgsqlSnakeCaseNameTranslator(legacyMode).TranslateTypeName(value); + public string TranslateTypeName(CultureInfo? culture, string value, bool legacyMode) + => new NpgsqlSnakeCaseNameTranslator(legacyMode, culture).TranslateTypeName(value); [Test, TestCaseSource(typeof(SnakeCaseNameTranslatorTests), nameof(TestCases))] - public string TranslateMemberName(string value, bool legacyMode) - => new NpgsqlSnakeCaseNameTranslator(legacyMode).TranslateMemberName(value); + public string TranslateMemberName(CultureInfo? culture, string value, bool legacyMode) + => new NpgsqlSnakeCaseNameTranslator(legacyMode, culture).TranslateMemberName(value); - static IEnumerable TestCases => new (string value, string legacyResult, string result)[] + static IEnumerable TestCases => new (CultureInfo? culture, string value, string legacyResult, string result)[] { - ("Hi!! This is text. Time to test.", "hi!! _this is text. _time to test.", "hi_this_is_text_time_to_test"), - ("9999-12-31T23:59:59.9999999Z", "9999-12-31_t23:59:59.9999999_z", "9999_12_31t23_59_59_9999999z"), - ("FK_post_simple_blog_BlogId", "f_k_post_simple_blog__blog_id", "fk_post_simple_blog_blog_id"), - ("already_snake_case_ ", "already_snake_case_ ", "already_snake_case_"), - ("SHOUTING_CASE", "s_h_o_u_t_i_n_g__c_a_s_e", "shouting_case"), - ("IsJSONProperty", "is_j_s_o_n_property", "is_json_property"), - ("SnA__ kEcAsE", "sn_a__ k_ec_as_e", "sn_a__k_ec_as_e"), - ("SnA__kEcAsE", "sn_a__k_ec_as_e", "sn_a__k_ec_as_e"), - ("SnAkEcAsE", "sn_ak_ec_as_e", "sn_ak_ec_as_e"), - ("URLValue", "u_r_l_value", "url_value"), - ("Xml2Json", "xml2_json", "xml2json"), - (" IPhone ", " _i_phone ", "i_phone"), - ("I Phone", "i _phone", "i_phone"), - (" IPhone", " _i_phone", "i_phone"), - ("I Phone", "i _phone", "i_phone"), - ("IPhone", "i_phone", "i_phone"), - ("iPhone", "i_phone", "i_phone"), - ("IsCIA", "is_c_i_a", "is_cia"), - ("Person", "person", "person"), - ("ABC123", "a_b_c123", "abc123"), - ("VmQ", "vm_q", "vm_q"), - ("URL", "u_r_l", "url"), - ("AB1", "a_b1", "ab1"), - ("ID", "i_d", "id"), - ("I", "i", "i"), - ("", "", "") + (null, "Hi!! This is text. Time to test.", "hi!! _this is text. _time to test.", "hi_this_is_text_time_to_test"), + (null, "9999-12-31T23:59:59.9999999Z", "9999-12-31_t23:59:59.9999999_z", "9999_12_31t23_59_59_9999999z"), + (null, "FK_post_simple_blog_BlogId", "f_k_post_simple_blog__blog_id", "fk_post_simple_blog_blog_id"), + (null, "already_snake_case_ ", "already_snake_case_ ", "already_snake_case_"), + (null, "SHOUTING_CASE", "s_h_o_u_t_i_n_g__c_a_s_e", "shouting_case"), + (null, "IsJSONProperty", "is_j_s_o_n_property", "is_json_property"), + (null, "SnA__ kEcAsE", "sn_a__ k_ec_as_e", "sn_a__k_ec_as_e"), + (null, "SnA__kEcAsE", "sn_a__k_ec_as_e", "sn_a__k_ec_as_e"), + (null, "SnAkEcAsE", "sn_ak_ec_as_e", "sn_ak_ec_as_e"), + (null, "URLValue", "u_r_l_value", "url_value"), + (null, "Xml2Json", "xml2_json", "xml2json"), + (null, " IPhone ", " _i_phone ", "i_phone"), + (null, "I Phone", "i _phone", "i_phone"), + (null, " IPhone", " _i_phone", "i_phone"), + (null, "I Phone", "i _phone", "i_phone"), + (null, "IPhone", "i_phone", "i_phone"), + (null, "iPhone", "i_phone", "i_phone"), + (null, "IsCIA", "is_c_i_a", "is_cia"), + (null, "Person", "person", "person"), + (null, "ABC123", "a_b_c123", "abc123"), + (null, "VmQ", "vm_q", "vm_q"), + (null, "URL", "u_r_l", "url"), + (null, "AB1", "a_b1", "ab1"), + (null, "ID", "i_d", "id"), + (null, "I", "i", "i"), + (null, "", "", ""), + (trTRCulture, "IPhone", "ı_phone", "ı_phone"), // dotless I -> dotless ı + (enUSCulture, "IPhone", "i_phone", "i_phone"), + (CultureInfo.InvariantCulture, "IPhone", "i_phone", "i_phone"), }.SelectMany(x => new[] { - new TestCaseData(x.value, true).Returns(x.legacyResult), - new TestCaseData(x.value, false).Returns(x.result), + new TestCaseData(x.culture, x.value, true).Returns(x.legacyResult), + new TestCaseData(x.culture, x.value, false).Returns(x.result), }); -} \ No newline at end of file + + [Test, Description("Checks translating a name with letter 'I' in Turkish locale with default setting (Invariant Culture)")] + [SetCulture("tr-TR")] + public void TurkeyTest() + { + var translator = new NpgsqlSnakeCaseNameTranslator(); + var legacyTranslator = new NpgsqlSnakeCaseNameTranslator(true); + + const string clrName = "IPhone"; + const string expected = "i_phone"; + + Assert.That(translator.TranslateMemberName(clrName), Is.EqualTo(expected)); + Assert.That(translator.TranslateTypeName(clrName), Is.EqualTo(expected)); + Assert.That(legacyTranslator.TranslateMemberName(clrName), Is.EqualTo(expected)); + Assert.That(legacyTranslator.TranslateTypeName(clrName), Is.EqualTo(expected)); + } +} diff --git a/test/Npgsql.Tests/SqlQueryParserTests.cs b/test/Npgsql.Tests/SqlQueryParserTests.cs index d161823114..c01c155fd4 100644 --- a/test/Npgsql.Tests/SqlQueryParserTests.cs +++ b/test/Npgsql.Tests/SqlQueryParserTests.cs @@ -1,4 +1,3 @@ -using System; using System.Collections.Generic; using System.Data; using System.Linq; diff --git a/test/Npgsql.Tests/StoredProcedureTests.cs b/test/Npgsql.Tests/StoredProcedureTests.cs index a77a6c8d85..ae13fa015c 100644 --- a/test/Npgsql.Tests/StoredProcedureTests.cs +++ b/test/Npgsql.Tests/StoredProcedureTests.cs @@ -15,17 +15,17 @@ public class StoredProcedureTests : TestBase [TestCase(true, true)] public async Task With_input_parameters(bool withPositional, bool withNamed) { - var table = await CreateTempTable(SharedDataSource, "foo int, bar int"); - var sproc = await GetTempProcedureName(SharedDataSource); + var table = await CreateTempTable(DataSource, "foo int, bar int"); + var sproc = await GetTempProcedureName(DataSource); - await SharedDataSource.ExecuteNonQueryAsync(@$" + await DataSource.ExecuteNonQueryAsync(@$" CREATE PROCEDURE {sproc}(a int, b int) LANGUAGE SQL AS $$ INSERT INTO {table} VALUES (a, b); $$"); - await using (var command = SharedDataSource.CreateCommand(sproc)) + await using (var command = DataSource.CreateCommand(sproc)) { command.CommandType = CommandType.StoredProcedure; @@ -40,7 +40,7 @@ LANGUAGE SQL await command.ExecuteNonQueryAsync(); } - await using (var command = SharedDataSource.CreateCommand($"SELECT * FROM {table}")) + await using (var command = DataSource.CreateCommand($"SELECT * FROM {table}")) await using (var reader = await command.ExecuteReaderAsync()) { await reader.ReadAsync(); @@ -55,11 +55,11 @@ LANGUAGE SQL [TestCase(true, true)] public async Task With_output_parameters(bool withPositional, bool withNamed) { - MinimumPgVersion(SharedDataSource, "14.0", "Stored procedure OUT parameters are only support starting with version 14"); + MinimumPgVersion(DataSource, "14.0", "Stored procedure OUT parameters are only support starting with version 14"); - var sproc = await GetTempProcedureName(SharedDataSource); + var sproc = await GetTempProcedureName(DataSource); - await SharedDataSource.ExecuteNonQueryAsync(@$" + await DataSource.ExecuteNonQueryAsync(@$" CREATE PROCEDURE {sproc}(a int, OUT out1 int, OUT out2 int, b int) LANGUAGE plpgsql AS $$ @@ -68,7 +68,7 @@ LANGUAGE plpgsql out2 = b; END$$"); - await using var command = SharedDataSource.CreateCommand(sproc); + await using var command = DataSource.CreateCommand(sproc); command.CommandType = CommandType.StoredProcedure; command.Parameters.Add(new() { Value = 8 }); @@ -96,9 +96,9 @@ LANGUAGE plpgsql [TestCase(true, true)] public async Task With_input_output_parameters(bool withPositional, bool withNamed) { - var sproc = await GetTempProcedureName(SharedDataSource); + var sproc = await GetTempProcedureName(DataSource); - await SharedDataSource.ExecuteNonQueryAsync(@$" + await DataSource.ExecuteNonQueryAsync(@$" CREATE PROCEDURE {sproc}(a int, INOUT inout1 int, INOUT inout2 int, b int) LANGUAGE plpgsql AS $$ @@ -107,7 +107,7 @@ LANGUAGE plpgsql inout2 = inout2 + b; END$$"); - await using var command = SharedDataSource.CreateCommand(sproc); + await using var command = DataSource.CreateCommand(sproc); command.CommandType = CommandType.StoredProcedure; command.Parameters.Add(new() { Value = 8 }); @@ -129,6 +129,88 @@ LANGUAGE plpgsql Assert.That(reader[1], Is.EqualTo(11)); } + [Test] + public async Task Batch_positional_parameters_works() + { + var tempname = await GetTempProcedureName(DataSource); + await using var connection = await DataSource.OpenConnectionAsync(); + await using var transaction = await connection.BeginTransactionAsync(IsolationLevel.Serializable); + await using var batch = new NpgsqlBatch(connection, transaction) + { + BatchCommands = + { + new(tempname) + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { DbType = DbType.Int64, Direction = ParameterDirection.Output } + } + }, + new ("COMMIT") + } + }; + + Assert.ThrowsAsync(() => batch.ExecuteNonQueryAsync()); + } + + [Test] + public async Task Batch_StoredProcedure_output_parameters_works() + { + // Proper OUT params were introduced in PostgreSQL 14 + MinimumPgVersion(DataSource, "14.0", "Stored procedure OUT parameters are only support starting with version 14"); + var sproc = await GetTempProcedureName(DataSource); + + await using var connection = await DataSource.OpenConnectionAsync(); + await using var transaction = await connection.BeginTransactionAsync(IsolationLevel.Serializable); + var c = connection.CreateCommand(); + c.CommandText = $""" + CREATE OR REPLACE PROCEDURE {sproc} + ( + p_username TEXT, + OUT p_user_id BIGINT + ) + LANGUAGE plpgsql + AS $$ + BEGIN + p_user_id = 1; + return; + END; + $$; + """; + await c.ExecuteNonQueryAsync(); + + await using var batch = new NpgsqlBatch(connection, transaction) + { + BatchCommands = + { + new(sproc) + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { NpgsqlDbType = NpgsqlDbType.Bigint, Direction = ParameterDirection.Output } + } + }, + new(sproc) + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { NpgsqlDbType = NpgsqlDbType.Bigint, Direction = ParameterDirection.Output } + } + } + } + }; + + await batch.ExecuteNonQueryAsync(); + Assert.That(batch.BatchCommands[0].Parameters[1].Value, Is.EqualTo(1)); + Assert.That(batch.BatchCommands[1].Parameters[1].Value, Is.EqualTo(1)); + } + #region DeriveParameters [Test, Description("Tests function parameter derivation with IN, OUT and INOUT parameters")] @@ -213,8 +295,8 @@ public async Task DeriveParameters_procedure_with_case_sensitive_name() { await using var command = new NpgsqlCommand(@"""ProcedureCaseSensitive""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -233,8 +315,8 @@ public async Task DeriveParameters_quote_characters_in_function_name() { await using var command = new NpgsqlCommand(sproc, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -253,8 +335,8 @@ await conn.ExecuteNonQueryAsync( { await using var command = new NpgsqlCommand(@"""My.Dotted.Procedure""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -273,8 +355,8 @@ await conn.ExecuteNonQueryAsync( $"CREATE PROCEDURE {sproc}(x int, y int, out sum int, out product int) AS 'SELECT $1 + $2, $1 * $2' LANGUAGE sql"); await using var command = new NpgsqlCommand(sproc, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual("x", command.Parameters[0].ParameterName); - Assert.AreEqual("y", command.Parameters[1].ParameterName); + Assert.That(command.Parameters[0].ParameterName, Is.EqualTo("x")); + Assert.That(command.Parameters[1].ParameterName, Is.EqualTo("y")); } [Test] diff --git a/test/Npgsql.Tests/Support/AssemblySetUp.cs b/test/Npgsql.Tests/Support/AssemblySetUp.cs index 851e452acb..9c0e4d1789 100644 --- a/test/Npgsql.Tests/Support/AssemblySetUp.cs +++ b/test/Npgsql.Tests/Support/AssemblySetUp.cs @@ -1,7 +1,5 @@ -using Microsoft.Extensions.Logging; using Npgsql; using Npgsql.Tests; -using Npgsql.Tests.Support; using NUnit.Framework; using System; using System.Threading; @@ -28,7 +26,6 @@ public void Setup() var builder = new NpgsqlConnectionStringBuilder(connString) { Pooling = false, - Multiplexing = false, Database = "postgres" }; diff --git a/test/Npgsql.Tests/Support/ListLoggerFactory.cs b/test/Npgsql.Tests/Support/ListLoggerFactory.cs index 2852335df8..98f94cb8fa 100644 --- a/test/Npgsql.Tests/Support/ListLoggerFactory.cs +++ b/test/Npgsql.Tests/Support/ListLoggerFactory.cs @@ -35,20 +35,15 @@ public void AddProvider(ILoggerProvider provider) public void Dispose() => StopRecording(); - class ListLogger : ILogger + class ListLogger(ListLoggerProvider provider) : ILogger { - readonly ListLoggerProvider _provider; - - public ListLogger(ListLoggerProvider provider) - => _provider = provider; - public List<(LogLevel, EventId, string, object?, Exception?)> LoggedEvents { get; } - = new(); + = []; public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) { - if (_provider._recording) + if (provider._recording) { lock (this) { @@ -66,7 +61,7 @@ public void Clear() } } - public bool IsEnabled(LogLevel logLevel) => _provider._recording; + public bool IsEnabled(LogLevel logLevel) => provider._recording; public IDisposable BeginScope(TState state) where TState : notnull => new Scope(); @@ -79,14 +74,9 @@ public void Dispose() } } - class RecordingDisposable : IDisposable + class RecordingDisposable(ListLoggerProvider provider) : IDisposable { - readonly ListLoggerProvider _provider; - - public RecordingDisposable(ListLoggerProvider provider) - => _provider = provider; - public void Dispose() - => _provider.StopRecording(); + => provider.StopRecording(); } } diff --git a/test/Npgsql.Tests/Support/MultiplexingTestBase.cs b/test/Npgsql.Tests/Support/MultiplexingTestBase.cs deleted file mode 100644 index c7483390e0..0000000000 --- a/test/Npgsql.Tests/Support/MultiplexingTestBase.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System.Collections.Concurrent; -using NUnit.Framework; - -namespace Npgsql.Tests; - -[TestFixture(MultiplexingMode.NonMultiplexing)] -[TestFixture(MultiplexingMode.Multiplexing)] -public abstract class MultiplexingTestBase : TestBase -{ - protected bool IsMultiplexing => MultiplexingMode == MultiplexingMode.Multiplexing; - - protected MultiplexingMode MultiplexingMode { get; } - - readonly ConcurrentDictionary<(string ConnString, bool IsMultiplexing), string> _connStringCache - = new(); - - public override string ConnectionString { get; } - - protected MultiplexingTestBase(MultiplexingMode multiplexingMode) - { - MultiplexingMode = multiplexingMode; - - // If the test requires multiplexing to be on or off, use a small cache to avoid reparsing and - // regenerating the connection string every time - ConnectionString = _connStringCache.GetOrAdd((base.ConnectionString, IsMultiplexing), - tup => new NpgsqlConnectionStringBuilder(tup.ConnString) - { - Multiplexing = tup.IsMultiplexing - }.ToString()); - } -} - -public enum MultiplexingMode -{ - NonMultiplexing, - Multiplexing -} \ No newline at end of file diff --git a/test/Npgsql.Tests/Support/PgCancellationRequest.cs b/test/Npgsql.Tests/Support/PgCancellationRequest.cs index c07f606bb8..928f388bd0 100644 --- a/test/Npgsql.Tests/Support/PgCancellationRequest.cs +++ b/test/Npgsql.Tests/Support/PgCancellationRequest.cs @@ -1,37 +1,23 @@ -using System.IO; +using System.IO; using Npgsql.Internal; namespace Npgsql.Tests.Support; -class PgCancellationRequest +class PgCancellationRequest(NpgsqlReadBuffer readBuffer, NpgsqlWriteBuffer writeBuffer, Stream stream, int processId, int secret) { - readonly NpgsqlReadBuffer _readBuffer; - readonly NpgsqlWriteBuffer _writeBuffer; - readonly Stream _stream; - - public int ProcessId { get; } - public int Secret { get; } + public int ProcessId { get; } = processId; + public int Secret { get; } = secret; bool completed; - public PgCancellationRequest(NpgsqlReadBuffer readBuffer, NpgsqlWriteBuffer writeBuffer, Stream stream, int processId, int secret) - { - _readBuffer = readBuffer; - _writeBuffer = writeBuffer; - _stream = stream; - - ProcessId = processId; - Secret = secret; - } - public void Complete() { if (completed) return; - _readBuffer.Dispose(); - _writeBuffer.Dispose(); - _stream.Dispose(); + readBuffer.Dispose(); + writeBuffer.Dispose(); + stream.Dispose(); completed = true; } diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index 7cc33c1877..426a1519c8 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -7,7 +7,6 @@ using System.Threading.Channels; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.Util; namespace Npgsql.Tests.Support; @@ -17,18 +16,20 @@ class PgPostmasterMock : IAsyncDisposable const int WriteBufferSize = 8192; const int CancelRequestCode = 1234 << 16 | 5678; const int SslRequest = 80877103; + const int GssRequest = 80877104; - static readonly Encoding Encoding = PGUtil.UTF8Encoding; - static readonly Encoding RelaxedEncoding = PGUtil.RelaxedUTF8Encoding; + static readonly Encoding Encoding = NpgsqlWriteBuffer.UTF8Encoding; + static readonly Encoding RelaxedEncoding = NpgsqlWriteBuffer.RelaxedUTF8Encoding; readonly Socket _socket; - readonly List _allServers = new(); + readonly List _allServers = []; bool _acceptingClients; Task? _acceptClientsTask; int _processIdCounter; readonly bool _completeCancellationImmediately; readonly string? _startupErrorCode; + readonly bool _breakOnGssEncryptionRequest; ChannelWriter> _pendingRequestsWriter { get; } ChannelReader> _pendingRequestsReader { get; } @@ -49,9 +50,10 @@ internal static PgPostmasterMock Start( string? connectionString = null, bool completeCancellationImmediately = true, MockState state = MockState.MultipleHostsDisabled, - string? startupErrorCode = null) + string? startupErrorCode = null, + bool breakOnGssEncryptionRequest = false) { - var mock = new PgPostmasterMock(connectionString, completeCancellationImmediately, state, startupErrorCode); + var mock = new PgPostmasterMock(connectionString, completeCancellationImmediately, state, startupErrorCode, breakOnGssEncryptionRequest); mock.AcceptClients(); return mock; } @@ -60,7 +62,8 @@ internal PgPostmasterMock( string? connectionString = null, bool completeCancellationImmediately = true, MockState state = MockState.MultipleHostsDisabled, - string? startupErrorCode = null) + string? startupErrorCode = null, + bool breakOnGssEncryptionRequest = false) { var pendingRequestsChannel = Channel.CreateUnbounded>(); _pendingRequestsReader = pendingRequestsChannel.Reader; @@ -71,6 +74,7 @@ internal PgPostmasterMock( _completeCancellationImmediately = completeCancellationImmediately; State = state; _startupErrorCode = startupErrorCode; + _breakOnGssEncryptionRequest = breakOnGssEncryptionRequest; _socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); var endpoint = new IPEndPoint(IPAddress.Loopback, 0); @@ -81,17 +85,20 @@ internal PgPostmasterMock( Port = localEndPoint.Port; connectionStringBuilder.Host = Host; connectionStringBuilder.Port = Port; +#pragma warning disable CS0618 // Type or member is obsolete connectionStringBuilder.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading; +#pragma warning restore CS0618 // Type or member is obsolete ConnectionString = connectionStringBuilder.ConnectionString; _socket.Listen(5); } - public NpgsqlDataSourceBuilder GetDataSourceBuilder() - => new(ConnectionString); - - public NpgsqlDataSource CreateDataSource() - => NpgsqlDataSource.Create(ConnectionString); + public NpgsqlDataSource CreateDataSource(Action? configure = null) + { + var builder = new NpgsqlDataSourceBuilder(ConnectionString); + configure?.Invoke(builder); + return builder.Build(); + } void AcceptClients() { @@ -139,12 +146,32 @@ async Task Accept(bool completeCancellationImmediat var readBuffer = new NpgsqlReadBuffer(null!, stream, clientSocket, ReadBufferSize, Encoding, RelaxedEncoding); var writeBuffer = new NpgsqlWriteBuffer(null!, stream, clientSocket, WriteBufferSize, Encoding); + writeBuffer.MessageLengthValidation = false; await readBuffer.EnsureAsync(4); var len = readBuffer.ReadInt32(); await readBuffer.EnsureAsync(len - 4); var request = readBuffer.ReadInt32(); + if (request == GssRequest) + { + if (_breakOnGssEncryptionRequest) + { + readBuffer.Dispose(); + writeBuffer.Dispose(); + await stream.DisposeAsync(); + return default; + } + + writeBuffer.WriteByte((byte)'N'); + await writeBuffer.Flush(async: true); + + await readBuffer.EnsureAsync(4); + len = readBuffer.ReadInt32(); + await readBuffer.EnsureAsync(len - 4); + request = readBuffer.ReadInt32(); + } + if (request == SslRequest) { writeBuffer.WriteByte((byte)'N'); @@ -210,6 +237,8 @@ internal async ValueTask WaitForCancellationRequest() return serverOrCancellationRequest.CancellationRequest; } + internal async ValueTask SkipNextConnection() => await _pendingRequestsReader.ReadAsync(); + public async ValueTask DisposeAsync() { var endpoint = _socket.LocalEndPoint as IPEndPoint; diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index 639124be5c..9f7a799649 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -7,15 +7,18 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.TypeMapping; -using Npgsql.Util; +using Npgsql.Internal.Postgres; using NUnit.Framework; namespace Npgsql.Tests.Support; class PgServerMock : IDisposable { - static readonly Encoding Encoding = PGUtil.UTF8Encoding; + static uint BoolOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bool).Value; + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + static uint TextOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Text).Value; + + static readonly Encoding Encoding = NpgsqlWriteBuffer.UTF8Encoding; readonly NetworkStream _stream; readonly NpgsqlReadBuffer _readBuffer; @@ -38,6 +41,7 @@ internal PgServerMock( _stream = stream; _readBuffer = readBuffer; _writeBuffer = writeBuffer; + writeBuffer.MessageLengthValidation = false; } internal async Task Startup(MockState state) @@ -90,12 +94,12 @@ internal Task SendMockState(MockState state) return WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bool)) + .WriteRowDescription(new FieldDescription(BoolOid)) .WriteDataRow(BitConverter.GetBytes(isStandby)) .WriteCommandComplete() .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteRowDescription(new FieldDescription(TextOid)) .WriteDataRow(Encoding.ASCII.GetBytes(transactionReadOnly)) .WriteCommandComplete() .WriteReadyForQuery() @@ -148,7 +152,7 @@ internal async Task ExpectSimpleQuery(string expectedSql) Assert.That(actualSql, Is.EqualTo(expectedSql)); } - internal Task WaitForData() => _readBuffer.EnsureAsync(1); + internal Task WaitForData() => _readBuffer.EnsureAsync(1).AsTask(); internal Task FlushAsync() { @@ -159,7 +163,7 @@ internal Task FlushAsync() internal Task WriteScalarResponseAndFlush(int value) => WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(value))) .WriteCommandComplete() .WriteReadyForQuery() @@ -168,7 +172,7 @@ internal Task WriteScalarResponseAndFlush(int value) internal Task WriteScalarResponseAndFlush(bool value) => WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bool)) + .WriteRowDescription(new FieldDescription(BoolOid)) .WriteDataRow(BitConverter.GetBytes(value)) .WriteCommandComplete() .WriteReadyForQuery() @@ -177,7 +181,7 @@ internal Task WriteScalarResponseAndFlush(bool value) internal Task WriteScalarResponseAndFlush(string value) => WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteRowDescription(new FieldDescription(TextOid)) .WriteDataRow(Encoding.ASCII.GetBytes(value)) .WriteCommandComplete() .WriteReadyForQuery() @@ -209,7 +213,7 @@ internal PgServerMock WriteRowDescription(params FieldDescription[] fields) _writeBuffer.WriteByte((byte)BackendMessageCode.RowDescription); _writeBuffer.WriteInt32(4 + 2 + fields.Sum(f => Encoding.GetByteCount(f.Name) + 1 + 18)); - _writeBuffer.WriteInt16(fields.Length); + _writeBuffer.WriteInt16((short)fields.Length); foreach (var field in fields) { @@ -219,12 +223,26 @@ internal PgServerMock WriteRowDescription(params FieldDescription[] fields) _writeBuffer.WriteUInt32(field.TypeOID); _writeBuffer.WriteInt16(field.TypeSize); _writeBuffer.WriteInt32(field.TypeModifier); - _writeBuffer.WriteInt16((short)field.FormatCode); + _writeBuffer.WriteInt16(field.DataFormat.ToFormatCode()); } return this; } + internal PgServerMock WriteParameterDescription(params FieldDescription[] fields) + { + CheckDisposed(); + + _writeBuffer.WriteByte((byte)BackendMessageCode.ParameterDescription); + _writeBuffer.WriteInt32(1 + 4 + 2 + fields.Length * 4); + _writeBuffer.WriteUInt16((ushort)fields.Length); + + foreach (var field in fields) + _writeBuffer.WriteUInt32(field.TypeOID); + + return this; + } + internal PgServerMock WriteNoData() { CheckDisposed(); @@ -233,13 +251,21 @@ internal PgServerMock WriteNoData() return this; } + internal PgServerMock WriteEmptyQueryResponse() + { + CheckDisposed(); + _writeBuffer.WriteByte((byte)BackendMessageCode.EmptyQueryResponse); + _writeBuffer.WriteInt32(4); + return this; + } + internal PgServerMock WriteDataRow(params byte[][] columnValues) { CheckDisposed(); _writeBuffer.WriteByte((byte)BackendMessageCode.DataRow); _writeBuffer.WriteInt32(4 + 2 + columnValues.Sum(v => 4 + v.Length)); - _writeBuffer.WriteInt16(columnValues.Length); + _writeBuffer.WriteInt16((short)columnValues.Length); foreach (var field in columnValues) { @@ -259,7 +285,7 @@ internal async Task WriteDataRowWithFlush(params byte[][] columnValues) _writeBuffer.WriteByte((byte)BackendMessageCode.DataRow); _writeBuffer.WriteInt32(4 + 2 + columnValues.Sum(v => 4 + v.Length)); - _writeBuffer.WriteInt16(columnValues.Length); + _writeBuffer.WriteInt16((short)columnValues.Length); foreach (var field in columnValues) { @@ -328,12 +354,12 @@ internal PgServerMock WriteBackendKeyData(int processId, int secret) internal PgServerMock WriteCancellationResponse() => WriteErrorResponse(PostgresErrorCodes.QueryCanceled, "Cancellation", "Query cancelled"); - internal PgServerMock WriteCopyInResponse() + internal PgServerMock WriteCopyInResponse(bool isBinary = false) { CheckDisposed(); _writeBuffer.WriteByte((byte)BackendMessageCode.CopyInResponse); _writeBuffer.WriteInt32(5); - _writeBuffer.WriteByte(0); + _writeBuffer.WriteByte(isBinary ? (byte)1 : (byte)0); _writeBuffer.WriteInt16(1); _writeBuffer.WriteInt16(0); return this; @@ -381,4 +407,4 @@ public void Dispose() _disposed = true; } -} \ No newline at end of file +} diff --git a/src/Npgsql/SingleThreadSynchronizationContext.cs b/test/Npgsql.Tests/Support/SingleThreadSynchronizationContext.cs similarity index 97% rename from src/Npgsql/SingleThreadSynchronizationContext.cs rename to test/Npgsql.Tests/Support/SingleThreadSynchronizationContext.cs index 99318be975..a7fedad3d6 100644 --- a/src/Npgsql/SingleThreadSynchronizationContext.cs +++ b/test/Npgsql.Tests/Support/SingleThreadSynchronizationContext.cs @@ -1,10 +1,9 @@ -using System; +using System; using System.Collections.Concurrent; using System.Diagnostics; using System.Threading; -using Microsoft.Extensions.Logging; -namespace Npgsql; +namespace Npgsql.Tests.Support; sealed class SingleThreadSynchronizationContext : SynchronizationContext, IDisposable { @@ -118,4 +117,4 @@ internal Disposable(SynchronizationContext synchronizationContext) public void Dispose() => SetSynchronizationContext(_synchronizationContext); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 0bea07a7b8..b8c531139f 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Data; using System.Diagnostics; @@ -6,9 +7,11 @@ using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Npgsql.Internal.Postgres; using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; +using NUnit.Framework.Constraints; namespace Npgsql.Tests; @@ -20,321 +23,688 @@ public abstract class TestBase /// public virtual string ConnectionString => TestUtil.ConnectionString; - static SemaphoreSlim DatabaseCreationLock = new(1); + static readonly SemaphoreSlim DatabaseCreationLock = new(1); + + static readonly object dataSourceLockObject = new(); + + static ConcurrentDictionary DataSources = new(StringComparer.Ordinal); #region Type testing - public async Task AssertType( + public Task AssertType( T value, string sqlLiteral, - string pgTypeName, - NpgsqlDbType? npgsqlDbType, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefaultForReading = true, - bool isDefaultForWriting = true, - bool? isDefault = null, - bool isNpgsqlDbTypeInferredFromClrType = true, - Func? comparer = null) - { - await using var connection = await OpenConnectionAsync(); - return await AssertType( - connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, isDefaultForWriting, - isDefault, isNpgsqlDbTypeInferredFromClrType, comparer); - } - - public async Task AssertType( + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); + + public Task AssertType( NpgsqlDataSource dataSource, T value, string sqlLiteral, - string pgTypeName, - NpgsqlDbType? npgsqlDbType, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefaultForReading = true, - bool isDefaultForWriting = true, - bool? isDefault = null, - bool isNpgsqlDbTypeInferredFromClrType = true, - Func? comparer = null) - { - await using var connection = await dataSource.OpenConnectionAsync(); - - return await AssertType(connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, - isDefaultForWriting, isDefault, isNpgsqlDbTypeInferredFromClrType); - } - - public async Task AssertType( + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(dataSource.OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); + + public Task AssertType( NpgsqlConnection connection, T value, string sqlLiteral, - string pgTypeName, - NpgsqlDbType? npgsqlDbType, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefaultForReading = true, - bool isDefaultForWriting = true, - bool? isDefault = null, - bool isNpgsqlDbTypeInferredFromClrType = true, - Func? comparer = null) + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(new(connection), disposeConnection: false, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); + + public Task AssertType( + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); + + public Task AssertType( + NpgsqlDataSource dataSource, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(dataSource.OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); + + public Task AssertType( + NpgsqlConnection connection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(new(connection), disposeConnection: false, valueFactory, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); + + static async Task AssertTypeCore( + ValueTask connectionTask, + bool disposeConnection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) { - if (isDefault is not null) - isDefaultForReading = isDefaultForWriting = isDefault.Value; + var connection = await connectionTask; + await using var _ = disposeConnection ? connection : null; - await AssertTypeWrite(connection, () => value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForWriting, isNpgsqlDbTypeInferredFromClrType); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, value, isDefaultForReading, comparer); + await AssertTypeWriteCore(new(connection), disposeConnection: false, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); + return await AssertTypeReadCore(new(connection), disposeConnection: false, sqlLiteral, dataTypeName, valueFactory(), + valueTypeEqualsFieldType, comparer, skipArrayCheck); } - public async Task AssertTypeRead(string sqlLiteral, string pgTypeName, T expected, bool isDefault = true) - { - await using var connection = await OpenConnectionAsync(); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault); - } + public Task AssertTypeWrite( + T value, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); public Task AssertTypeWrite( + NpgsqlDataSource dataSource, T value, - string expectedSqlLiteral, - string pgTypeName, - NpgsqlDbType npgsqlDbType, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) - => AssertTypeWrite(() => value, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType); - - public async Task AssertTypeWrite( - Func valueFactory, - string expectedSqlLiteral, - string pgTypeName, - NpgsqlDbType npgsqlDbType, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) - { - await using var connection = await OpenConnectionAsync(); - await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, isNpgsqlDbTypeInferredFromClrType); - } + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(dataSource.OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - internal static async Task AssertTypeRead( + public Task AssertTypeWrite( NpgsqlConnection connection, + T value, string sqlLiteral, - string pgTypeName, - T expected, - bool isDefault = true, - Func? comparer = null) - { - if (sqlLiteral.Contains('\'')) - sqlLiteral = sqlLiteral.Replace("'", "''"); + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(new(connection), disposeConnection: false, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, skipArrayCheck); - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", connection); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); - await reader.ReadAsync(); + public Task AssertTypeWrite( + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - var truncatedSqlLiteral = sqlLiteral.Length > 40 ? sqlLiteral[..40] + "..." : sqlLiteral; + public Task AssertTypeWrite( + NpgsqlDataSource dataSource, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(dataSource.OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - var dataTypeName = reader.GetDataTypeName(0); - var dotIndex = dataTypeName.IndexOf('.'); - if (dotIndex > -1 && dataTypeName.Substring(0, dotIndex) is "pg_catalog" or "public") - dataTypeName = dataTypeName.Substring(dotIndex + 1); + public Task AssertTypeWrite( + NpgsqlConnection connection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(new(connection), disposeConnection: false, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); + + static async Task AssertTypeWriteCore( + ValueTask connectionTask, + bool disposeConnection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference, + DbTypes? dbType, + bool skipArrayCheck) + { + var connection = await connectionTask; + await using var _ = disposeConnection ? connection : null; - Assert.That(dataTypeName, Is.EqualTo(pgTypeName), - $"Got wrong result from GetDataTypeName when reading '{truncatedSqlLiteral}'"); + await AssertTypeWriteCore( + connection, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference ?? DataTypeInference.Match, + dbType); - if (isDefault) + // Check the corresponding array type as well + if (!skipArrayCheck && !dataTypeName.EndsWith("[]", StringComparison.Ordinal)) { - // For arrays, GetFieldType always returns typeof(Array), since PG arrays can have arbitrary dimensionality - Assert.That(reader.GetFieldType(0), Is.EqualTo(dataTypeName.EndsWith("[]") ? typeof(Array) : typeof(T)), - $"Got wrong result from GetFieldType when reading '{truncatedSqlLiteral}'"); + await AssertTypeWriteCore( + connection, + () => new[] { valueFactory(), valueFactory() }, + ArrayLiteral(sqlLiteral), + dataTypeName + "[]", dataTypeInference ?? DataTypeInference.Match, + expectedDbTypes: null); } + } + + public enum DataTypeInference + { + /// + /// Data type is inferred from the CLR value and matches the data type under test. + /// + Match, + + /// + /// Data type is inferred from the CLR value but differs from the data type under test. + /// + /// + /// Used when we get some inferred data type (e.g. CLR strings are inferred to be 'text') but this does not match the data type (e.g. 'json') under test. + /// + Mismatch, + + /// + /// Data type can not be inferred from the CLR value. + /// + /// + /// This is for CLR types that are statically unknown to Npgsql (plugin types: NodaTime/NTS, composite types, enums...), + /// or where we specifically don't want to infer a data type because there's no good option + /// (e.g. uint can be mapped to 'oid/xid/cid', but we don't want any of these as a default/inferred data type) + /// + Nothing, + } + + public readonly struct DbTypes(DbType dataTypeMappedDbType, DbType valueInferredDbType, DbType dbTypeToSet) + { + public DbType DataTypeMappedDbType { get; } = dataTypeMappedDbType; + public DbType ValueInferredDbType { get; } = valueInferredDbType; - var actual = isDefault ? (T)reader.GetValue(0) : reader.GetFieldValue(0); + // The DbType to explicitly set on the parameter. Usually same as ValueInferredDbType, + // It differs when testing DbType aliases (e.g. VarNumeric → DbType.Decimal) as we want to test those also work correctly. + public DbType DbTypeToSet { get; } = dbTypeToSet; - Assert.That(actual, comparer is null ? Is.EqualTo(expected) : Is.EqualTo(expected).Using(new SimpleComparer(comparer)), - $"Got wrong result from GetFieldValue value when reading '{truncatedSqlLiteral}'"); + public DbTypes(DbType dataTypeMappedDbType, DbType valueInferredDbType) + : this(dataTypeMappedDbType, valueInferredDbType, valueInferredDbType) {} - return actual; + public static implicit operator DbTypes(DbType dbType) => new(dbType, dbType, dbType); } - internal static async Task AssertTypeWrite( + static async Task AssertTypeWriteCore( NpgsqlConnection connection, Func valueFactory, - string expectedSqlLiteral, - string pgTypeName, - NpgsqlDbType? npgsqlDbType, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) + string sqlLiteral, + string dataTypeName, + DataTypeInference dataTypeInference, + DbTypes? expectedDbTypes) { - if (npgsqlDbType is null) - isNpgsqlDbTypeInferredFromClrType = false; - - // TODO: Interferes with both multiplexing and connection-specific mapping (used e.g. in NodaTime) - // Reset the type mapper to make sure we're resolving this type with a clean slate (for isolation, just in case) - // connection.TypeMapper.Reset(); + var npgsqlDbType = DataTypeName.FromDisplayName(dataTypeName).ToNpgsqlDbType(); // Strip any facet information (length/precision/scale) - var parenIndex = pgTypeName.IndexOf('('); - var pgTypeNameWithoutFacets = parenIndex > -1 ? pgTypeName[..parenIndex] : pgTypeName; + var parenIndex = dataTypeName.IndexOf('('); + var dataTypeNameWithoutFacets = parenIndex > -1 + ? dataTypeName[..parenIndex] + dataTypeName[(dataTypeName.IndexOf(')') + 1)..] + : dataTypeName; + + // For composite type with dots in name, Postgresql returns name with quotes - scheme."My.type.name" + // but for npgsql mapping we should use names without quotes - scheme.My.type.name + var dataTypeNameWithoutFacetsAndQuotes = dataTypeNameWithoutFacets.Replace("\"", string.Empty); // We test the following scenarios (between 2 and 5 in total): - // 1. With NpgsqlDbType explicitly set - // 2. With DataTypeName explicitly set - // 3. With DbType explicitly set (if one was provided) - // 4. With only the value set (if it's the default) - // 5. With only the value set, using generic NpgsqlParameter (if it's the default) + // 1. With value and DataTypeName set + // 2. With value and NpgsqlDbType set (when available) + // 3. With value and DbType explicitly set + // 4. With only the value set + // 5. With only the value set, using generic NpgsqlParameter + + // We only actually attempt to write to the database with a set DataTypeName, NpgsqlDbType, or when data type inference is exact. var errorIdentifierIndex = -1; var errorIdentifier = new Dictionary(); await using var cmd = new NpgsqlCommand { Connection = connection }; NpgsqlParameter p; - // With NpgsqlDbType - if (npgsqlDbType is not null) - { - p = new NpgsqlParameter { Value = valueFactory(), NpgsqlDbType = npgsqlDbType.Value }; - cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"NpgsqlDbType={npgsqlDbType}"; - CheckInference(); - } // With data type name - p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = pgTypeNameWithoutFacets }; + p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = dataTypeNameWithoutFacetsAndQuotes }; + errorIdentifier[++errorIdentifierIndex] = $"Value and DataTypeName={dataTypeNameWithoutFacetsAndQuotes}"; + DataTypeAsserts(); cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"DataTypeName={pgTypeNameWithoutFacets}"; - CheckInference(); - // With DbType - if (dbType is not null) + // With NpgsqlDbType + if (npgsqlDbType is not null) { - p = new NpgsqlParameter { Value = valueFactory(), DbType = dbType.Value }; + p = new NpgsqlParameter { Value = valueFactory(), NpgsqlDbType = npgsqlDbType.Value }; + errorIdentifier[++errorIdentifierIndex] = $"Value and NpgsqlDbType={npgsqlDbType}"; + DataTypeAsserts(); cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"DbType={dbType}"; - CheckInference(); } - if (isDefault) - { - // With (non-generic) value only - p = new NpgsqlParameter { Value = valueFactory() }; + // With DbType, if none was supplied we verify it's DbType.Object. + p = new NpgsqlParameter { Value = valueFactory() }; + errorIdentifier[++errorIdentifierIndex] = $"Value and DbType={expectedDbTypes?.DbTypeToSet}"; + if (expectedDbTypes?.DbTypeToSet is { } expectedDbType) + p.DbType = expectedDbType; + DbTypeAsserts(); + if (dataTypeInference is DataTypeInference.Match) cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = "Value only (non-generic)"; - if (isNpgsqlDbTypeInferredFromClrType) - CheckInference(); - // With (generic) value only - p = new NpgsqlParameter { TypedValue = valueFactory() }; + // With (non-generic) value only + p = new NpgsqlParameter { Value = valueFactory() }; + errorIdentifier[++errorIdentifierIndex] = $"Value (type {p.Value!.GetType().Name}, non-generic)"; + ValueAsserts(); + if (dataTypeInference is DataTypeInference.Match) cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = "Value only (generic)"; - if (isNpgsqlDbTypeInferredFromClrType) - CheckInference(); - } - Debug.Assert(cmd.Parameters.Count == errorIdentifierIndex + 1); + // With (generic) value only + p = new NpgsqlParameter { TypedValue = valueFactory() }; + errorIdentifier[++errorIdentifierIndex] = $"Value (type {p.Value!.GetType().Name}, generic)"; + ValueAsserts(); + if (dataTypeInference is DataTypeInference.Match) + cmd.Parameters.Add(p); cmd.CommandText = "SELECT " + string.Join(", ", Enumerable.Range(1, cmd.Parameters.Count).Select(i => "pg_typeof($1)::text, $1::text".Replace("$1", $"${i}"))); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); - await reader.ReadAsync(); + // Async execution: tests async write paths in converters + { + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await reader.ReadAsync(); + AssertWriteResults(reader); + } - for (var i = 0; i < cmd.Parameters.Count * 2; i += 2) + // Sync execution: tests sync write paths in converters. + // Reset parameter values first so that one-shot values (e.g. streams) can be re-read from the start. + for (var i = 0; i < cmd.Parameters.Count; i++) + cmd.Parameters[i].Value = valueFactory(); { - Assert.That(reader[i], Is.EqualTo(pgTypeNameWithoutFacets), $"Got wrong PG type name when writing with {errorIdentifier[i / 2]}"); - Assert.That(reader[i+1], Is.EqualTo(expectedSqlLiteral), $"Got wrong SQL literal when writing with {errorIdentifier[i / 2]}"); + using var reader = cmd.ExecuteReader(CommandBehavior.SequentialAccess); + reader.Read(); + AssertWriteResults(reader); } - void CheckInference() + void AssertWriteResults(NpgsqlDataReader reader) { - if (npgsqlDbType is not null) + for (var i = 0; i < cmd.Parameters.Count * 2; i += 2) { - Assert.That(p.NpgsqlDbType, Is.EqualTo(npgsqlDbType), - () => $"Got wrong inferred NpgsqlDbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); + var error = errorIdentifier[i / 2]; + Assert.That(reader[i], Is.EqualTo(dataTypeNameWithoutFacets), $"Got wrong data type name when writing with {error}"); + Assert.That(reader[i + 1], Is.EqualTo(sqlLiteral), $"Got wrong SQL literal when writing with {error}"); } + } + + void DataTypeAsserts() + { + var expectedDataTypeName = dataTypeNameWithoutFacetsAndQuotes; + var expectedNpgsqlDbType = npgsqlDbType ?? NpgsqlDbType.Unknown; + + var expectedDbType = expectedDbTypes?.DataTypeMappedDbType ?? DbType.Object; - Assert.That(p.DbType, Is.EqualTo(inferredDbType ?? dbType ?? DbType.Object), - () => $"Got wrong inferred DbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); + AssertParameterProperties(expectedDataTypeName, expectedNpgsqlDbType, expectedDbType); + } - Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacets), - () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); + void DbTypeAsserts() + { + // If DbType was set it overrules any value based data type inference. + // As DbType.Object never has any mapping either we check for null/Unknown when DbType.Object was set. + var (expectedDataTypeName, expectedNpgsqlDbType) = + expectedDbTypes is { DbTypeToSet: DbType.Object } + ? (null, NpgsqlDbType.Unknown) + : GetInferredDataType(); + + var expectedDbType = expectedDbTypes?.DbTypeToSet ?? DbType.Object; + + AssertParameterProperties(expectedDataTypeName, expectedNpgsqlDbType, expectedDbType); + } + + void ValueAsserts() + { + var (expectedDataTypeName, expectedNpgsqlDbType) = GetInferredDataType(); + + var expectedDbType = expectedDbTypes?.ValueInferredDbType ?? DbType.Object; + + AssertParameterProperties(expectedDataTypeName, expectedNpgsqlDbType, expectedDbType); + } + + void AssertParameterProperties(string? expectedDataTypeName, NpgsqlDbType expectedNpgsqlDbType, DbType expectedDbType) + { + Assert.That(p.DataTypeName, Is.EqualTo(expectedDataTypeName), + $"Got wrong DataTypeName when checking with {errorIdentifier[errorIdentifierIndex]}"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(expectedNpgsqlDbType), + $"Got wrong NpgsqlDbType when checking with {errorIdentifier[errorIdentifierIndex]}"); + Assert.That(p.DbType, Is.EqualTo(expectedDbType), + $"Got wrong DbType when checking with {errorIdentifier[errorIdentifierIndex]}"); } + + (string? ExpectedDataTypeName, NpgsqlDbType ExpectedNpgsqlDbType) GetInferredDataType() + => dataTypeInference switch + { + DataTypeInference.Match => + (dataTypeNameWithoutFacetsAndQuotes, npgsqlDbType ?? NpgsqlDbType.Unknown), + DataTypeInference.Mismatch => + // Only respect Mismatch if the type is well known (for now that means it has an NpgsqlDbType). + // Otherwise use the exact values so we'll error with the right details. + p.NpgsqlDbType is not NpgsqlDbType.Unknown + ? (p.DataTypeName, p.NpgsqlDbType) + : (dataTypeNameWithoutFacetsAndQuotes, npgsqlDbType ?? NpgsqlDbType.Unknown), + DataTypeInference.Nothing => + (null, NpgsqlDbType.Unknown), + _ => throw new UnreachableException($"Unknown case {dataTypeInference}") + }; } - public async Task AssertTypeUnsupported(T value, string sqlLiteral, string pgTypeName) + public Task AssertTypeRead(string sqlLiteral, string dataTypeName, T value, + bool valueTypeEqualsFieldType = true, Func? comparer = null, bool skipArrayCheck = false) + => AssertTypeReadCore(OpenConnectionAsync(), disposeConnection: true, sqlLiteral, dataTypeName, + value, valueTypeEqualsFieldType, comparer, skipArrayCheck); + + public Task AssertTypeRead(NpgsqlDataSource dataSource, string sqlLiteral, string dataTypeName, T value, + bool valueTypeEqualsFieldType = true, Func? comparer = null, bool skipArrayCheck = false) + => AssertTypeReadCore(dataSource.OpenConnectionAsync(), disposeConnection: true, sqlLiteral, dataTypeName, + value, valueTypeEqualsFieldType, comparer, skipArrayCheck); + + public Task AssertTypeRead(NpgsqlConnection connection, string sqlLiteral, string dataTypeName, T value, + bool valueTypeEqualsFieldType = true, Func? comparer = null, bool skipArrayCheck = false) + => AssertTypeReadCore(new(connection), disposeConnection: false, sqlLiteral, dataTypeName, + value, valueTypeEqualsFieldType, comparer, skipArrayCheck); + + static async Task AssertTypeReadCore( + ValueTask connectionTask, + bool disposeConnection, + string sqlLiteral, + string dataTypeName, + T value, + bool valueTypeEqualsFieldType, + Func? comparer, + bool skipArrayCheck) { - await AssertTypeUnsupportedRead(sqlLiteral, pgTypeName); - await AssertTypeUnsupportedWrite(value, pgTypeName); + var connection = await connectionTask; + await using var _ = disposeConnection ? connection : null; + + var result = await AssertTypeReadCore(connection, sqlLiteral, dataTypeName, value, valueTypeEqualsFieldType, comparer); + + // Check the corresponding array type as well + if (!skipArrayCheck && !dataTypeName.EndsWith("[]", StringComparison.Ordinal)) + { + await AssertTypeReadCore( + connection, + ArrayLiteral(sqlLiteral), + dataTypeName + "[]", + new[] { value, value }, + valueTypeEqualsFieldType, + comparer is null ? null : (array1, array2) => array1.SequenceEqual(array2, CreateEqualityComparer(comparer!))); + } + return result; } - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName) + static async Task AssertTypeReadCore( + NpgsqlConnection connection, + string sqlLiteral, + string dataTypeName, + T value, + bool valueTypeEqualsFieldType, + Func? comparer) { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); + if (sqlLiteral.Contains('\'')) + sqlLiteral = sqlLiteral.Replace("'", "''"); + + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{dataTypeName}", connection); + + // Async execution: tests async and sync column reads within a single buffered query. await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + var truncatedSqlLiteral = sqlLiteral.Length > 40 ? sqlLiteral[..40] + "..." : sqlLiteral; + + var actualDataTypeName = reader.GetDataTypeName(0); + var dotIndex = actualDataTypeName.IndexOf('.'); + if (dotIndex > -1 && actualDataTypeName.Substring(0, dotIndex) is "pg_catalog" or "public") + actualDataTypeName = actualDataTypeName.Substring(dotIndex + 1); + + // For composite type with dots, postgres works only with quoted name - scheme."My.type.name" + // but npgsql converts it to name without quotes + var dataTypeNameWithoutQuotes = dataTypeName.Replace("\"", string.Empty); + Assert.That(actualDataTypeName, Is.EqualTo(dataTypeNameWithoutQuotes), + $"Got wrong result from GetDataTypeName when reading '{truncatedSqlLiteral}'"); + + // For arrays, GetFieldType always returns typeof(Array), since PG arrays can have arbitrary dimensionality. + var isArrayTest = actualDataTypeName.EndsWith("[]", StringComparison.Ordinal) && typeof(T).IsArray; + Assert.That(reader.GetFieldType(0), + (valueTypeEqualsFieldType || isArrayTest ? new ConstraintExpression() : Is.Not) + .EqualTo(isArrayTest ? typeof(Array) : typeof(T)), + $"Got wrong result from GetFieldType when reading '{truncatedSqlLiteral}'"); + + T actual; + if (valueTypeEqualsFieldType) + { + // Set IsRowBuffered=false before the first read so _column is still -1, ensuring GetFieldValueAsync + // goes through the real async converter path (converter.ReadAsObjectAsync) rather than the sync shortcut. + reader.IsRowBuffered = false; + actual = (T)await reader.GetFieldValueAsync(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer)), + $"Got wrong result from GetFieldValueAsync() value when reading '{truncatedSqlLiteral}'"); + + // Restore IsRowBuffered so subsequent sync reads use the normal buffered code path. + reader.IsRowBuffered = true; + actual = (T)reader.GetValue(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), + $"Got wrong result from GetValue() value when reading '{truncatedSqlLiteral}'"); + + actual = (T)reader.GetFieldValue(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer)), + $"Got wrong result from GetFieldValue() value when reading '{truncatedSqlLiteral}'"); + + return actual; + } + + // See comment above about IsRowBuffered. + reader.IsRowBuffered = false; + actual = await reader.GetFieldValueAsync(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), + $"Got wrong result from GetFieldValueAsync() value when reading '{truncatedSqlLiteral}'"); + + reader.IsRowBuffered = true; + actual = reader.GetFieldValue(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), + $"Got wrong result from GetFieldValue() value when reading '{truncatedSqlLiteral}'"); + + return actual; + } + + static EqualityComparer CreateEqualityComparer(Func comparer) + => EqualityComparer.Create((x, y) => + { + if (x is null && y is null) + return true; + if (x is null || y is null) + return false; + return comparer(x, y); + }); + + public async Task AssertTypeUnsupported(T value, string sqlLiteral, string dataTypeName, NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) + { + await AssertTypeUnsupportedRead(sqlLiteral, dataTypeName, dataSource, skipArrayCheck); + await AssertTypeUnsupportedWrite(value, dataTypeName, dataSource, skipArrayCheck); } - public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName) - => AssertTypeUnsupportedRead(sqlLiteral, pgTypeName); + public Task AssertTypeUnsupportedRead(string sqlLiteral, string dataTypeName, + NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) + => AssertTypeUnsupportedRead(sqlLiteral, dataTypeName, dataSource, skipArrayCheck); - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName) + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string dataTypeName, + NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) where TException : Exception { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); + var result = await AssertTypeUnsupportedReadCore(sqlLiteral, dataTypeName, dataSource); + + // Check the corresponding array type as well + if (!skipArrayCheck && !dataTypeName.EndsWith("[]", StringComparison.Ordinal)) + { + await AssertTypeUnsupportedReadCore(ArrayLiteral(sqlLiteral), dataTypeName + "[]", dataSource); + } + + return result; + } + + async Task AssertTypeUnsupportedReadCore(string sqlLiteral, string dataTypeName, NpgsqlDataSource? dataSource = null) + where TException : Exception + { + dataSource ??= DataSource; + + await using var conn = await dataSource.OpenConnectionAsync(); + await using var tx = await conn.BeginTransactionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{dataTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - return Assert.Throws(() => reader.GetFieldValue(0))!; + return Assert.Throws(() => + { + _ = typeof(T) == typeof(object) ? reader.GetValue(0) : reader.GetFieldValue(0); + })!; } - public Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null) - => AssertTypeUnsupportedWrite(value, pgTypeName); + public Task AssertTypeUnsupportedWrite(T value, string? dataTypeName = null, NpgsqlDataSource? dataSource = null, + bool skipArrayCheck = false) + => AssertTypeUnsupportedWrite(value, dataTypeName, dataSource, skipArrayCheck); + + public async Task AssertTypeUnsupportedWrite(T value, string? dataTypeName = null, + NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) + where TException : Exception + { + var result = await AssertTypeUnsupportedWriteCore(value, dataTypeName, dataSource); + + // Check the corresponding array type as well + if (!skipArrayCheck && !dataTypeName?.EndsWith("[]", StringComparison.Ordinal) == true) + { + await AssertTypeUnsupportedWriteCore([value, value], dataTypeName + "[]", dataSource); + } + + return result; + } - public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null) + async Task AssertTypeUnsupportedWriteCore(T value, string? dataTypeName = null, NpgsqlDataSource? dataSource = null) where TException : Exception { - await using var conn = await OpenConnectionAsync(); + dataSource ??= DataSource; + + await using var conn = await dataSource.OpenConnectionAsync(); + await using var tx = await conn.BeginTransactionAsync(); await using var cmd = new NpgsqlCommand("SELECT $1", conn) { Parameters = { new() { Value = value } } }; - if (pgTypeName is not null) - cmd.Parameters[0].DataTypeName = pgTypeName; + if (dataTypeName is not null) + cmd.Parameters[0].DataTypeName = dataTypeName; return Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync())!; } - class SimpleComparer : IEqualityComparer + // For array quoting rules, see array_out in https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/arrayfuncs.c + static string ArrayLiteral(string elementLiteral) { - readonly Func _comparerDelegate; - - public SimpleComparer(Func comparerDelegate) - => _comparerDelegate = comparerDelegate; + switch (elementLiteral) + { + case "": + elementLiteral = "\"\""; + break; + case "NULL": + elementLiteral = "\"NULL\""; + break; + default: + // Escape quotes and backslashes, quote for special chars + elementLiteral = elementLiteral.Replace("\\", "\\\\").Replace("\"", "\\\""); + if (elementLiteral.Any(c => c is '{' or '}' or ',' or '"' or '\\' || char.IsWhiteSpace(c))) + { + elementLiteral = '"' + elementLiteral + '"'; + } - public bool Equals(T? x, T? y) - => x is null - ? y is null - : y is not null && _comparerDelegate(x, y); + break; + } - public int GetHashCode(T obj) => throw new NotSupportedException(); + return $"{{{elementLiteral},{elementLiteral}}}"; } #endregion Type testing #region Utilities for use by tests - protected static readonly NpgsqlDataSource SharedDataSource = NpgsqlDataSource.Create(TestUtil.ConnectionString); - protected virtual NpgsqlDataSourceBuilder CreateDataSourceBuilder() - => new(TestUtil.ConnectionString); + => new(ConnectionString); protected virtual NpgsqlDataSource CreateDataSource() - => NpgsqlDataSource.Create(TestUtil.ConnectionString); + => CreateDataSource(ConnectionString); + + protected NpgsqlDataSource CreateDataSource(string connectionString) + => NpgsqlDataSource.Create(connectionString); + + protected NpgsqlDataSource CreateDataSource(Action connectionStringBuilderAction) + { + var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString); + connectionStringBuilderAction(connectionStringBuilder); + return NpgsqlDataSource.Create(connectionStringBuilder); + } + + protected NpgsqlDataSource CreateDataSource(Action configure) + { + var builder = new NpgsqlDataSourceBuilder(ConnectionString); + configure(builder); + return builder.Build(); + } + + protected static NpgsqlDataSource GetDataSource(string connectionString) + { + if (!DataSources.TryGetValue(connectionString, out var dataSource)) + { + lock (dataSourceLockObject) + { + if (!DataSources.TryGetValue(connectionString, out dataSource)) + { + var canonicalConnectionString = new NpgsqlConnectionStringBuilder(connectionString).ToString(); + if (!DataSources.TryGetValue(canonicalConnectionString, out dataSource)) + { + DataSources[canonicalConnectionString] = dataSource = NpgsqlDataSource.Create(connectionString); + } + DataSources[connectionString] = dataSource; + } + } + } + + return dataSource; + } protected virtual NpgsqlDataSource CreateLoggingDataSource( out ListLoggerProvider listLoggerProvider, @@ -355,55 +725,60 @@ protected virtual NpgsqlDataSource CreateLoggingDataSource( return builder.Build(); } - protected virtual NpgsqlConnection CreateConnection(string? connectionString = null) - => new(connectionString ?? ConnectionString); + protected NpgsqlDataSource DefaultDataSource + => GetDataSource(ConnectionString); - protected virtual NpgsqlConnection CreateConnection(Action builderAction) - { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - builderAction(builder); - return new NpgsqlConnection(builder.ConnectionString); - } + protected virtual NpgsqlDataSource DataSource => DefaultDataSource; - protected virtual NpgsqlConnection OpenConnection(string? connectionString = null) - => OpenConnection(connectionString, async: false).GetAwaiter().GetResult(); + protected virtual NpgsqlConnection CreateConnection() + => DataSource.CreateConnection(); - protected virtual NpgsqlConnection OpenConnection(Action builderAction) + protected virtual NpgsqlConnection OpenConnection() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - builderAction(builder); - return OpenConnection(builder.ConnectionString, async: false).GetAwaiter().GetResult(); + var connection = CreateConnection(); + try + { + OpenConnection(connection, async: false).GetAwaiter().GetResult(); + return connection; + } + catch + { + connection.Dispose(); + throw; + } } - protected virtual ValueTask OpenConnectionAsync(string? connectionString = null) - => OpenConnection(connectionString, async: true); - - protected virtual ValueTask OpenConnectionAsync( - Action builderAction) + protected virtual async ValueTask OpenConnectionAsync() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - builderAction(builder); - return OpenConnection(builder.ConnectionString, async: true); + var connection = CreateConnection(); + try + { + await OpenConnection(connection, async: true); + return connection; + } + catch + { + await connection.DisposeAsync(); + throw; + } } - ValueTask OpenConnection(string? connectionString, bool async) + static Task OpenConnection(NpgsqlConnection conn, bool async) { return OpenConnectionInternal(hasLock: false); - async ValueTask OpenConnectionInternal(bool hasLock) + async Task OpenConnectionInternal(bool hasLock) { - var conn = CreateConnection(connectionString); try { if (async) await conn.OpenAsync(); else conn.Open(); - return conn; } catch (PostgresException e) { - if (e.SqlState == PostgresErrorCodes.InvalidPassword && connectionString == TestUtil.DefaultConnectionString) + if (e.SqlState == PostgresErrorCodes.InvalidPassword) throw new Exception("Please create a user npgsql_tests as follows: CREATE USER npgsql_tests PASSWORD 'npgsql_tests' SUPERUSER"); if (e.SqlState == PostgresErrorCodes.InvalidCatalogName) @@ -413,7 +788,7 @@ async ValueTask OpenConnectionInternal(bool hasLock) DatabaseCreationLock.Wait(); try { - return await OpenConnectionInternal(hasLock: true); + await OpenConnectionInternal(hasLock: true); } finally { @@ -422,10 +797,9 @@ async ValueTask OpenConnectionInternal(bool hasLock) } // Database does not exist and we have the lock, proceed to creation - var builder = new NpgsqlConnectionStringBuilder(connectionString ?? ConnectionString) + var builder = new NpgsqlConnectionStringBuilder(TestUtil.ConnectionString) { Pooling = false, - Multiplexing = false, Database = "postgres" }; @@ -439,7 +813,7 @@ async ValueTask OpenConnectionInternal(bool hasLock) await conn.OpenAsync(); else conn.Open(); - return conn; + return; } throw; @@ -447,12 +821,6 @@ async ValueTask OpenConnectionInternal(bool hasLock) } } - protected NpgsqlConnection OpenConnection(NpgsqlConnectionStringBuilder csb) - => OpenConnection(csb.ToString()); - - protected virtual ValueTask OpenConnectionAsync(NpgsqlConnectionStringBuilder csb) - => OpenConnectionAsync(csb.ToString()); - // In PG under 9.1 you can't do SELECT pg_sleep(2) in binary because that function returns void and PG doesn't know // how to transfer that. So cast to text server-side. protected static NpgsqlCommand CreateSleepCommand(NpgsqlConnection conn, int seconds = 1000) diff --git a/test/Npgsql.Tests/SyncOrAsyncTestBase.cs b/test/Npgsql.Tests/SyncOrAsyncTestBase.cs index 0eff0c7488..2a676e97cb 100644 --- a/test/Npgsql.Tests/SyncOrAsyncTestBase.cs +++ b/test/Npgsql.Tests/SyncOrAsyncTestBase.cs @@ -4,13 +4,11 @@ namespace Npgsql.Tests; [TestFixture(SyncOrAsync.Sync)] [TestFixture(SyncOrAsync.Async)] -public abstract class SyncOrAsyncTestBase : TestBase +public abstract class SyncOrAsyncTestBase(SyncOrAsync syncOrAsync) : TestBase { protected bool IsAsync => SyncOrAsync == SyncOrAsync.Async; - protected SyncOrAsync SyncOrAsync { get; } - - protected SyncOrAsyncTestBase(SyncOrAsync syncOrAsync) => SyncOrAsync = syncOrAsync; + protected SyncOrAsync SyncOrAsync { get; } = syncOrAsync; } public enum SyncOrAsync diff --git a/test/Npgsql.Tests/SystemTransactionTests.cs b/test/Npgsql.Tests/SystemTransactionTests.cs index 27a9d057e1..2363fb170a 100644 --- a/test/Npgsql.Tests/SystemTransactionTests.cs +++ b/test/Npgsql.Tests/SystemTransactionTests.cs @@ -1,24 +1,26 @@ using System; using System.Data; +using System.Threading; using System.Transactions; using NUnit.Framework; +using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; // This test suite contains ambient transaction tests, except those involving distributed transactions which are only // supported on .NET Framework / Windows. Distributed transaction tests are in DistributedTransactionTests. -[NonParallelizable] public class SystemTransactionTests : TestBase { [Test, Description("Single connection enlisting explicitly, committing")] public void Explicit_enlist() { - using var conn = new NpgsqlConnection(ConnectionStringEnlistOff); - conn.Open(); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); using (var scope = new TransactionScope()) { conn.EnlistTransaction(Transaction.Current); - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); scope.Complete(); @@ -27,7 +29,7 @@ public void Explicit_enlist() AssertNoPreparedTransactions(); using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Unexpected data count"); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Unexpected data count"); tx.Rollback(); } } @@ -35,18 +37,20 @@ public void Explicit_enlist() [Test, Description("Single connection enlisting implicitly, committing")] public void Implicit_enlist() { - var conn = new NpgsqlConnection(ConnectionStringEnlistOn); + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.CreateConnection(); using (var scope = new TransactionScope()) { conn.Open(); - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); scope.Complete(); } using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Unexpected data count"); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Unexpected data count"); tx.Rollback(); } } @@ -54,37 +58,41 @@ public void Implicit_enlist() [Test] public void Enlist_Off() { + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using (new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOff)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOff)) + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) { Assert.That(conn1.EnlistedTransaction, Is.Null); - Assert.That(conn1.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); - Assert.That(conn2.ExecuteScalar("SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Unexpected data count"); + Assert.That(conn1.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn2.ExecuteScalar($"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Unexpected data count"); } // Scope disposed and not completed => rollback, but no enlistment, so changes should still be there. - using (var conn3 = OpenConnection(ConnectionStringEnlistOff)) + using (var conn3 = dataSource.OpenConnection()) { - Assert.That(conn3.ExecuteScalar("SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Insert unexpectedly rollback-ed"); + Assert.That(conn3.ExecuteScalar($"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Insert unexpectedly rollback-ed"); } } [Test, Description("Single connection enlisting explicitly, rollback")] public void Rollback_explicit_enlist() { - using var conn = OpenConnection(); + using var dataSource = CreateDataSource(); + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); using (new TransactionScope()) { conn.EnlistTransaction(Transaction.Current); - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); // No commit } AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0), "Unexpected data count"); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0), "Unexpected data count"); tx.Rollback(); } } @@ -93,36 +101,36 @@ public void Rollback_explicit_enlist() [IssueLink("https://github.com/npgsql/npgsql/issues/2408")] public void Rollback_implicit_enlist([Values(true, false)] bool pooling) { - var connectionString = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - Pooling = pooling - }.ToString(); + using var dataSource = CreateDataSource(csb => csb.Pooling = pooling); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (new TransactionScope()) - using (var conn = OpenConnection(connectionString)) + using (var conn = dataSource.OpenConnection()) { - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); // No commit } - AssertNumberOfRows(0); + AssertNumberOfRows(0, tableName); } [Test] public void Two_consecutive_connections() { + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using (var scope = new TransactionScope()) { - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) { - Assert.That(conn1.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"), Is.EqualTo(1), "Unexpected first insert rowcount"); + Assert.That(conn1.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"), Is.EqualTo(1), "Unexpected first insert rowcount"); } - using (var conn2 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn2 = dataSource.OpenConnection()) { - Assert.That(conn2.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test2')"), Is.EqualTo(1), "Unexpected second insert rowcount"); + Assert.That(conn2.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test2')"), Is.EqualTo(1), "Unexpected second insert rowcount"); } // Consecutive connections used in same scope should not promote the transaction to distributed. @@ -130,65 +138,66 @@ public void Two_consecutive_connections() AssertNoPreparedTransactions(); scope.Complete(); } - AssertNumberOfRows(2); + AssertNumberOfRows(2, tableName); } [Test] public void Close_connection() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - ApplicationName = nameof(Close_connection), - }.ToString(); + // We assert the number of idle connections below + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (var scope = new TransactionScope()) - using (var conn = OpenConnection(connString)) + using (var conn = dataSource.OpenConnection()) { - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); conn.Close(); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); scope.Complete(); } - AssertNumberOfRows(1); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - Assert.That(pool!.Statistics.Idle, Is.EqualTo(1)); - - using (var conn = new NpgsqlConnection(connString)) - NpgsqlConnection.ClearPool(conn); + AssertNumberOfRows(1, tableName); + Assert.That(dataSource.Statistics.Idle, Is.EqualTo(1)); } [Test] public void Enlist_to_two_transactions() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); var ctx = new CommittableTransaction(); conn.EnlistTransaction(ctx); Assert.That(() => conn.EnlistTransaction(new CommittableTransaction()), Throws.Exception.TypeOf()); ctx.Rollback(); using var tx = conn.BeginTransaction(); - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0)); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0)); tx.Rollback(); } [Test] public void Enlist_twice_to_same_transaction() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); var ctx = new CommittableTransaction(); conn.EnlistTransaction(ctx); conn.EnlistTransaction(ctx); ctx.Rollback(); using var tx = conn.BeginTransaction(); - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0)); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0)); tx.Rollback(); } [Test] public void Scope_after_scope() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); using (new TransactionScope()) conn.EnlistTransaction(Transaction.Current); using (new TransactionScope()) @@ -196,7 +205,7 @@ public void Scope_after_scope() using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0)); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0)); tx.Rollback(); } } @@ -204,49 +213,85 @@ public void Scope_after_scope() [Test] public void Reuse_connection() { + // We check the ProcessID below + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (var scope = new TransactionScope()) - using (var conn = new NpgsqlConnection(ConnectionStringEnlistOn)) + using (var conn = dataSource.CreateConnection()) { conn.Open(); var processId = conn.ProcessID; - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"); conn.Close(); conn.Open(); Assert.That(conn.ProcessID, Is.EqualTo(processId)); - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test2')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test2')"); conn.Close(); scope.Complete(); } - AssertNumberOfRows(2); + AssertNumberOfRows(2, tableName); } [Test] public void Reuse_connection_rollback() { + // We check the ProcessID below + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (new TransactionScope()) - using (var conn = new NpgsqlConnection(ConnectionStringEnlistOn)) + using (var conn = dataSource.CreateConnection()) { conn.Open(); var processId = conn.ProcessID; - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"); conn.Close(); conn.Open(); Assert.That(conn.ProcessID, Is.EqualTo(processId)); - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test2')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test2')"); conn.Close(); // No commit } - AssertNumberOfRows(0); + AssertNumberOfRows(0, tableName); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3735")] + public void Reuse_connection_resets_temp_tables() + { + // When a connection is closed inside a TransactionScope and then reopened, + // temp tables should be discarded. + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + using (new TransactionScope()) + using (var conn = dataSource.CreateConnection()) + { + conn.Open(); + var processId = conn.ProcessID; + + // Create a temp table + conn.ExecuteNonQuery("CREATE TEMP TABLE temp_test (id INT)"); + + conn.Close(); + + // Reopen - should get the same physical connection but with reset state + conn.Open(); + Assert.That(conn.ProcessID, Is.EqualTo(processId), "Should reuse the same physical connection"); + + // The temp table should have been discarded + Assert.That(() => conn.ExecuteScalar("SELECT COUNT(*) FROM temp_test"), + Throws.Exception.TypeOf() + .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.UndefinedTable)); + } } [Test, Ignore("Timeout doesn't seem to fire on .NET Core / Linux")] public void Timeout_triggers_rollback_while_busy() { - using (var conn = OpenConnection(ConnectionStringEnlistOff)) + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using (var conn = dataSource.OpenConnection()) { using (new TransactionScope(TransactionScopeOption.Required, TimeSpan.FromSeconds(1))) { @@ -258,15 +303,17 @@ public void Timeout_triggers_rollback_while_busy() } } - AssertNumberOfRows(0); + AssertNumberOfRows(0, tableName); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1579")] public void Schema_connection_should_not_enlist() { + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using var tran = new TransactionScope(); - using var conn = OpenConnection(ConnectionStringEnlistOn); - using var cmd = new NpgsqlCommand("SELECT * FROM data", conn); + using var conn = dataSource.OpenConnection(); + using var cmd = new NpgsqlCommand($"SELECT * FROM {tableName}", conn); using var reader = cmd.ExecuteReader(CommandBehavior.KeyInfo); reader.GetColumnSchema(); AssertNoDistributedIdentifier(); @@ -277,16 +324,14 @@ public void Schema_connection_should_not_enlist() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1737")] public void Single_unpooled_connection() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - Enlist = true - }; - - + csb.Pooling = false; + csb.Enlist = true; + }); using var scope = new TransactionScope(); - using (var conn = OpenConnection(csb)) + using (var conn = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT 1", conn)) cmd.ExecuteNonQuery(); @@ -294,16 +339,36 @@ public void Single_unpooled_connection() } [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3863")] - public void Break_connector_while_in_transaction_scope_with_rollback([Values] bool pooling) + [IssueLink("https://github.com/npgsql/npgsql/issues/4963"), IssueLink("https://github.com/npgsql/npgsql/issues/5783")] + public void Single_closed_connection_in_transaction_scope([Values] bool pooling, [Values] bool multipleHosts) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) + using var dataSource = CreateDataSource(csb => + { + csb.Pooling = pooling; + csb.Enlist = true; + csb.Host = multipleHosts ? "localhost,127.0.0.1" : csb.Host; + }); + + using (var scope = new TransactionScope()) + using (var conn = dataSource.OpenConnection()) + using (var cmd = new NpgsqlCommand("SELECT 1", conn)) { - Pooling = pooling, - }; + cmd.ExecuteNonQuery(); + conn.Close(); + Assert.That(pooling ? dataSource.Statistics.Busy : dataSource.Statistics.Total, Is.EqualTo(1)); + scope.Complete(); + } + + Assert.That(pooling ? dataSource.Statistics.Busy : dataSource.Statistics.Total, Is.EqualTo(0)); + } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/3863")] + public void Break_connector_while_in_transaction_scope_with_rollback([Values] bool pooling) + { + using var dataSource = CreateDataSource(csb => csb.Pooling = pooling); using var scope = new TransactionScope(); - var conn = OpenConnection(csb); + var conn = dataSource.OpenConnection(); conn.ExecuteNonQuery("SELECT 1"); conn.Connector!.Break(new Exception(nameof(Break_connector_while_in_transaction_scope_with_rollback))); @@ -313,15 +378,11 @@ public void Break_connector_while_in_transaction_scope_with_rollback([Values] bo [IssueLink("https://github.com/npgsql/npgsql/issues/3863")] public void Break_connector_while_in_transaction_scope_with_commit([Values] bool pooling) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - Pooling = pooling, - }; - + using var dataSource = CreateDataSource(csb => csb.Pooling = pooling); var ex = Assert.Throws(() => { using var scope = new TransactionScope(); - var conn = OpenConnection(csb); + var conn = dataSource.OpenConnection(); conn.ExecuteNonQuery("SELECT 1"); conn.Connector!.Break(new Exception(nameof(Break_connector_while_in_transaction_scope_with_commit))); @@ -337,11 +398,7 @@ public void Break_connector_while_in_transaction_scope_with_commit([Values] bool [IssueLink("https://github.com/npgsql/npgsql/issues/4085")] public void Open_connection_with_enlist_and_aborted_TransactionScope() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Enlist = true - }; - + var dataSource = EnlistOnDataSource; for (var i = 0; i < 2; i++) { using var outerScope = new TransactionScope(); @@ -355,7 +412,7 @@ public void Open_connection_with_enlist_and_aborted_TransactionScope() { } - var ex = Assert.Throws(() => OpenConnection(csb))!; + var ex = Assert.Throws(() => dataSource.OpenConnection())!; Assert.That(ex.Message, Is.EqualTo("The operation is not valid for the state of the transaction.")); } } @@ -363,16 +420,18 @@ public void Open_connection_with_enlist_and_aborted_TransactionScope() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1594")] public void Bug1594() { + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using var outerScope = new TransactionScope(); - using (var conn = OpenConnection(ConnectionStringEnlistOn)) + using (var conn = dataSource.OpenConnection()) using (var innerScope1 = new TransactionScope()) { - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"); innerScope1.Complete(); } - using (OpenConnection(ConnectionStringEnlistOn)) + using (dataSource.OpenConnection()) using (new TransactionScope()) { // Don't complete, triggering rollback @@ -386,61 +445,56 @@ void AssertNoPreparedTransactions() int GetNumberOfPreparedTransactions() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT COUNT(*) FROM pg_prepared_xacts WHERE database = @database", conn); cmd.Parameters.Add(new NpgsqlParameter("database", conn.Database)); return (int)(long)cmd.ExecuteScalar()!; } - void AssertNumberOfRows(int expected) - => Assert.That(_controlConn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(expected), "Unexpected data count"); + void AssertNumberOfRows(int expected, string tableName) + { + using var conn = OpenConnection(); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(expected), "Unexpected data count"); + } static void AssertNoDistributedIdentifier() => Assert.That(Transaction.Current?.TransactionInformation.DistributedIdentifier ?? Guid.Empty, Is.EqualTo(Guid.Empty), "Distributed identifier found"); - public readonly string ConnectionStringEnlistOn; - public readonly string ConnectionStringEnlistOff; - #endregion Utilities #region Setup - public SystemTransactionTests() - { - ConnectionStringEnlistOn = new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = true }.ToString(); - ConnectionStringEnlistOff = new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = false }.ToString(); - } + NpgsqlDataSource EnlistOnDataSource { get; set; } = default!; - NpgsqlConnection _controlConn = default!; + NpgsqlDataSource EnlistOffDataSource { get; set; } = default!; [OneTimeSetUp] public void OneTimeSetUp() { - _controlConn = OpenConnection(); - - // All tests in this fixture should have exclusive access to the database they're running on. - // If we run these tests in parallel (i.e. two builds in parallel) they will interfere. - // Solve this by taking a PostgreSQL advisory lock for the lifetime of the fixture. - _controlConn.ExecuteNonQuery("SELECT pg_advisory_lock(666)"); - - _controlConn.ExecuteNonQuery("DROP TABLE IF EXISTS data"); - _controlConn.ExecuteNonQuery("CREATE TABLE data (name TEXT)"); + EnlistOnDataSource = CreateDataSource(csb => csb.Enlist = true); + EnlistOffDataSource = CreateDataSource(csb => csb.Enlist = false); } - [SetUp] - public void SetUp() + [OneTimeTearDown] + public void OnTimeTearDown() { - _controlConn.ExecuteNonQuery("TRUNCATE data"); + EnlistOnDataSource?.Dispose(); + EnlistOnDataSource = null!; + EnlistOffDataSource?.Dispose(); + EnlistOffDataSource = null!; } -#pragma warning disable CS8625 - [OneTimeTearDown] - public void OneTimeTearDown() + internal static string CreateTempTable(NpgsqlDataSource dataSource, string columns) { - _controlConn?.Close(); - _controlConn = null; + var tableName = "temp_table" + Interlocked.Increment(ref _tempTableCounter); + dataSource.ExecuteNonQuery(@$" +START TRANSACTION; SELECT pg_advisory_xact_lock(0); +DROP TABLE IF EXISTS {tableName} CASCADE; +COMMIT; +CREATE TABLE {tableName} ({columns})"); + return tableName; } -#pragma warning restore CS8625 #endregion } diff --git a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs b/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs deleted file mode 100644 index a3848de442..0000000000 --- a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs +++ /dev/null @@ -1,156 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NUnit.Framework; -using Npgsql.Util; - -namespace Npgsql.Tests; - -public class TaskTimeoutAndCancellationTest : TestBase -{ - const int TestResultValue = 777; - - async Task GetResultTaskAsync(int timeout, CancellationToken ct) - { - await Task.Delay(timeout, ct); - return TestResultValue; - } - - Task GetVoidTaskAsync(int timeout, CancellationToken ct) => Task.Delay(timeout, ct); - - [Test] - public async Task SuccessfulResultTaskAsync() => - Assert.AreEqual(TestResultValue, await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetResultTaskAsync(10, ct), NpgsqlTimeout.Infinite, CancellationToken.None)); - - [Test] - public async Task SuccessfulVoidTaskAsync() => - await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetVoidTaskAsync(10, ct), NpgsqlTimeout.Infinite, CancellationToken.None); - - [Test] - public void InfinitelyLongTaskTimeout() => - Assert.ThrowsAsync(async () => - await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetVoidTaskAsync(Timeout.Infinite, ct), new NpgsqlTimeout(TimeSpan.FromMilliseconds(10)), CancellationToken.None)); - - [Test] - public void InfinitelyLongTaskCancellation() - { - using var cts = new CancellationTokenSource(10); - Assert.ThrowsAsync(async () => - await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetVoidTaskAsync(Timeout.Infinite, ct), NpgsqlTimeout.Infinite, cts.Token)); - } - - /// - /// The test creates a delayed execution Task that is being fake-cancelled and fails subsequently and triggers 'TaskScheduler.UnobservedTaskException event'. - /// - /// - /// The test is based on timing and depends on availability of thread pool threads. Therefore it could become unstable if the environment is under pressure. - /// - [Theory, IssueLink("https://github.com/npgsql/npgsql/issues/4149")] - [TestCase("CancelAndTimeout")] - [TestCase("CancelOnly")] - [TestCase("TimeoutOnly")] - [TestCase("CancelAndTimeout")] - [TestCase("CancelOnly")] - [TestCase("TimeoutOnly")] - public Task DelayedFaultedTaskCancellation(string testCase) => RunDelayedFaultedTaskTestAsync(async getUnobservedTaskException => - { - var cancel = true; - var timeout = true; - switch (testCase) - { - case "TimeoutOnly": - cancel = false; - break; - case "CancelOnly": - timeout = false; - break; - } - - var notifyDelayCompleted = new SemaphoreSlim(0, 1); - - // Invoke the method that creates a delayed execution Task that fails subsequently. - await CreateTaskAndPreemptWithCancellationAsync(500, cancel, timeout, notifyDelayCompleted); - - // Wait enough time for the non-cancelable task to notify us that an exception is thrown. - await notifyDelayCompleted.WaitAsync(); - - // And then wait some more. - var repeatCount = 2; - while (getUnobservedTaskException() is null && repeatCount-- > 0) - { - await Task.Delay(100); - - // Run the garbage collector to collect unobserved Tasks. - GC.Collect(); - GC.WaitForPendingFinalizers(); - } - }); - - static async Task RunDelayedFaultedTaskTestAsync(Func, Task> test) - { - Exception? unobservedTaskException = null; - - // Subscribe to UnobservedTaskException event to store the Exception, if any. - void OnUnobservedTaskException(object? source, UnobservedTaskExceptionEventArgs args) - { - if (!args.Observed) - { - args.SetObserved(); - } - unobservedTaskException = args.Exception; - } - TaskScheduler.UnobservedTaskException += OnUnobservedTaskException; - - try - { - await test(() => unobservedTaskException); - - // Verify the unobserved Task exception event has not been received. - Assert.IsNull(unobservedTaskException, unobservedTaskException?.Message); - } - finally - { - TaskScheduler.UnobservedTaskException -= OnUnobservedTaskException; - } - } - - /// - /// Create a delayed execution, non-Cancellable Task that fails subsequently after the Task goes out of scope. - /// - static async Task CreateTaskAndPreemptWithCancellationAsync(int delayMs, bool cancel, bool timeout, SemaphoreSlim notifyDelayCompleted) - { - var nonCancellableTask = Task.Delay(delayMs, CancellationToken.None) - .ContinueWith( - async _ => - { - try - { - await Task.FromException(new Exception("Unobserved Task Test Exception")); - } - finally - { - notifyDelayCompleted.Release(); - } - }) - .Unwrap(); - - var timeoutMs = delayMs / 5; - using var cts = cancel ? new CancellationTokenSource(timeoutMs) : null; - try - { - await TaskTimeoutAndCancellation.ExecuteAsync( - _ => nonCancellableTask, - timeout ? new NpgsqlTimeout(TimeSpan.FromMilliseconds(timeoutMs)) : NpgsqlTimeout.Infinite, - cts?.Token ?? CancellationToken.None); - } - catch (TimeoutException) - { - // Expected due to preemptive time out. - } - catch (OperationCanceledException) when (cts?.IsCancellationRequested == true) - { - // Expected due to preemptive cancellation. - } - Assert.False(nonCancellableTask.IsCompleted); - } -} diff --git a/test/Npgsql.Tests/TestMetrics.cs b/test/Npgsql.Tests/TestMetrics.cs index 52bf2ed935..c90f1e484c 100644 --- a/test/Npgsql.Tests/TestMetrics.cs +++ b/test/Npgsql.Tests/TestMetrics.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; namespace Npgsql.Tests; @@ -41,17 +41,13 @@ private TestMetrics(TimeSpan allowedTime, bool reportOnStop) /// Report metrics to stdout when stopped. /// A new running TestMetrics object. public static TestMetrics Start(TimeSpan allowedTime, bool reportOnStop) - { - return new(allowedTime, reportOnStop); - } + => new(allowedTime, reportOnStop); /// - /// Incremnent the Iterations value by one. + /// Increment the Iterations value by one. /// public void IncrementIterations() - { - Iterations++; - } + => Iterations++; /// /// Stop the internal stop watch and record elapsed CPU times. @@ -81,9 +77,7 @@ public void Stop() /// Stop the internal stop watch and record elapsed CPU times. /// public void Dispose() - { - Stop(); - } + => Stop(); /// /// Report whether ElapsedClockTime has met or exceeded the maximum run time. @@ -96,19 +90,15 @@ public void Dispose() /// /// The number of iterations accumulated per the time span provided. public double IterationsPer(TimeSpan timeSpan) - { - return (double)Iterations / ((double)stopwatch.Elapsed.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); - } + => (double)Iterations / ((double)stopwatch.Elapsed.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); /// /// Calculate the number of iterations accumulated per second. - /// Equivelent to calling IterationsPer(new TimeSpan(0, 0, 1)). + /// Equivalent to calling IterationsPer(new TimeSpan(0, 0, 1)). /// /// The number of iterations accumulated per second. public double IterationsPerSecond() - { - return IterationsPer(new TimeSpan(0, 0, 1)); - } + => IterationsPer(new TimeSpan(0, 0, 1)); /// /// Calculate the number of iterations accumulated per the CPU time span provided. @@ -116,20 +106,16 @@ public double IterationsPerSecond() /// /// The number of iterations accumulated per the CPU time span provided. public double IterationsPerCPU(TimeSpan timeSpan) - { - return (double)Iterations / ((double)ElapsedTotalCPUTime.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); - } + => (double)Iterations / ((double)ElapsedTotalCPUTime.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); /// /// Calculate the number of iterations accumulated per CPU second. - /// Equivelent to calling IterationsPerCPU(new TimeSpan(0, 0, 1)). + /// Equivalent to calling IterationsPerCPU(new TimeSpan(0, 0, 1)). /// /// /// The number of iterations accumulated per CPU second. public double IterationsPerCPUSecond() - { - return IterationsPerCPU(new TimeSpan(0, 0, 1)); - } + => IterationsPerCPU(new TimeSpan(0, 0, 1)); /// /// Elapsed time since start. @@ -176,4 +162,4 @@ public TimeSpan ElapsedUserCPUTime /// Elapsed total (system + user) CPU time since start. /// public TimeSpan ElapsedTotalCPUTime => ElapsedSystemCPUTime + ElapsedUserCPUTime; -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index ecfdd85ff3..0d5c643225 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.Diagnostics.CodeAnalysis; @@ -19,7 +19,7 @@ public static class TestUtil /// test database. /// public const string DefaultConnectionString = - "Server=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0;SSL Mode=Disable"; + "Host=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0;SSL Mode=Disable"; /// /// The connection string that will be used when opening the connection to the tests database. @@ -57,7 +57,7 @@ public static void MinimumPgVersion(NpgsqlDataSource dataSource, string minVersi MinimumPgVersion(connection, minVersion, ignoreText); } - public static void MinimumPgVersion(NpgsqlConnection conn, string minVersion, string? ignoreText = null) + public static bool MinimumPgVersion(NpgsqlConnection conn, string minVersion, string? ignoreText = null) { var min = new Version(minVersion); if (conn.PostgreSqlVersion < min) @@ -66,7 +66,10 @@ public static void MinimumPgVersion(NpgsqlConnection conn, string minVersion, st if (ignoreText != null) msg += ": " + ignoreText; Assert.Ignore(msg); + return false; } + + return true; } public static void MaximumPgVersionExclusive(NpgsqlConnection conn, string maxVersion, string? ignoreText = null) @@ -83,9 +86,12 @@ public static void MaximumPgVersionExclusive(NpgsqlConnection conn, string maxVe static readonly Version MinCreateExtensionVersion = new(9, 1); - public static void IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreText = null) + public static async Task IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreText = null) { - if (new NpgsqlConnectionStringBuilder(conn.ConnectionString).ServerCompatibilityMode == ServerCompatibilityMode.Redshift) + await using var command = conn.CreateCommand(); + command.CommandText = "SELECT version()"; + var version = (string)(await command.ExecuteScalarAsync())!; + if (version.Contains("redshift", StringComparison.OrdinalIgnoreCase)) { var msg = "Test ignored on Redshift"; if (ignoreText != null) @@ -94,9 +100,6 @@ public static void IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreText = } } - public static bool IsPgPrerelease(NpgsqlConnection conn) - => ((string)conn.ExecuteScalar("SELECT version()")!).Contains("beta"); - public static void EnsureExtension(NpgsqlConnection conn, string extension, string? minVersion = null) => EnsureExtension(conn, extension, minVersion, async: false).GetAwaiter().GetResult(); @@ -105,22 +108,26 @@ public static Task EnsureExtensionAsync(NpgsqlConnection conn, string extension, static async Task EnsureExtension(NpgsqlConnection conn, string extension, string? minVersion, bool async) { - if (minVersion != null) - MinimumPgVersion(conn, minVersion, $"The extension '{extension}' only works for PostgreSQL {minVersion} and higher."); + if (minVersion != null && !MinimumPgVersion(conn, minVersion, $"The extension '{extension}' only works for PostgreSQL {minVersion} and higher.")) + return; if (conn.PostgreSqlVersion < MinCreateExtensionVersion) Assert.Ignore($"The 'CREATE EXTENSION' command only works for PostgreSQL {MinCreateExtensionVersion} and higher."); - if (async) - await conn.ExecuteNonQueryAsync($"CREATE EXTENSION IF NOT EXISTS {extension}"); - else - conn.ExecuteNonQuery($"CREATE EXTENSION IF NOT EXISTS {extension}"); + try + { + if (async) + await conn.ExecuteNonQueryAsync($"CREATE EXTENSION IF NOT EXISTS {extension}"); + else + conn.ExecuteNonQuery($"CREATE EXTENSION IF NOT EXISTS {extension}"); + } + catch (PostgresException ex) when (ex.ConstraintName == "pg_extension_name_index") + { + // The extension is already installed, but we can race across threads. + // https://stackoverflow.com/questions/63104126/create-extention-if-not-exists-doesnt-really-check-if-extention-does-not-exis + } conn.ReloadTypes(); - - // Multiplexing doesn't really support reloading types, since each connector uses its own connector type mapper when reading, - // which is different from the pool-wise connector mapper (which is used when writing). - NpgsqlConnection.ClearPool(conn); } /// @@ -162,12 +169,15 @@ public static async Task EnsurePostgis(NpgsqlConnection conn) { await EnsureExtensionAsync(conn, "postgis"); } - catch (PostgresException e) when (e.SqlState == PostgresErrorCodes.UndefinedFile) + catch (PostgresException) { - // PostGIS packages aren't available for PostgreSQL prereleases - if (IsPgPrerelease(conn)) + if (Environment.GetEnvironmentVariable("NPGSQL_TEST_POSTGIS")?.ToLower(CultureInfo.InvariantCulture) is "1" or "true") + { + throw; + } + else { - Assert.Ignore($"PostGIS could not be installed, but PostgreSQL is prerelease ({conn.ServerVersion}), ignoring test suite."); + Assert.Ignore($"PostGIS isn't installed, skipping tests"); } } } @@ -246,6 +256,17 @@ internal static async Task GetTempViewName(NpgsqlConnection conn) return viewName; } + /// + /// Generates a unique materialized view name, usable for a single test, and drops it if it already exists. + /// Actual creation of the materialized view is the responsibility of the caller. + /// + internal static async Task GetTempMaterializedViewName(NpgsqlConnection conn) + { + var viewName = "temp_materialized_view" + Interlocked.Increment(ref _tempViewCounter); + await conn.ExecuteNonQueryAsync($"DROP MATERIALIZED VIEW IF EXISTS {viewName} CASCADE"); + return viewName; + } + /// /// Generates a unique function name, usable for a single test. /// Actual creation of the function is the responsibility of the caller. @@ -370,8 +391,8 @@ internal static IDisposable DisableSqlRewriting() NpgsqlCommand.EnableSqlRewriting = false; return new DeferredExecutionDisposable(() => NpgsqlCommand.EnableSqlRewriting = true); #else - Assert.Ignore("Cannot disable SQL rewriting in RELEASE builds"); - throw new NotSupportedException("Cannot disable SQL rewriting in RELEASE builds"); + Assert.Ignore("Cannot disable SQL rewriting in RELEASE builds"); + throw new NotSupportedException("Cannot disable SQL rewriting in RELEASE builds"); #endif } @@ -402,7 +423,7 @@ internal static void AssertLoggingStateContains( (LogLevel Level, EventId Id, string Message, object? State, Exception? Exception) log, string key, T value) - => Assert.That(log.State, Contains.Item(new KeyValuePair(key, value))); + => Assert.That(log.State as IEnumerable>, Contains.Item(new KeyValuePair(key, value))); internal static void AssertLoggingStateDoesNotContain( (LogLevel Level, EventId Id, string Message, object? State, Exception? Exception) log, @@ -495,13 +516,9 @@ public static void WaitUntilCommandIsInProgress(this NpgsqlCommand command) /// test reproduces the issue) /// [AttributeUsage(AttributeTargets.Method, AllowMultiple = true)] -public class IssueLink : Attribute +public class IssueLink(string linkAddress) : Attribute { - public string LinkAddress { get; private set; } - public IssueLink(string linkAddress) - { - LinkAddress = linkAddress; - } + public string LinkAddress { get; private set; } = linkAddress; } public enum PrepareOrNot @@ -515,20 +532,3 @@ public enum PooledOrNot Pooled, Unpooled } - -#if NETSTANDARD2_0 - static class QueueExtensions - { - public static bool TryDequeue(this Queue queue, out T result) - { - if (queue.Count == 0) - { - result = default; - return false; - } - - result = queue.Dequeue(); - return true; - } - } -#endif diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs new file mode 100644 index 0000000000..f9241c2a51 --- /dev/null +++ b/test/Npgsql.Tests/TracingTests.cs @@ -0,0 +1,855 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using NpgsqlTypes; +using NUnit.Framework; +using static Npgsql.Tests.TestUtil; + +namespace Npgsql.Tests; + +[NonParallelizable] +[TestFixture(true)] +[TestFixture(false)] +public class TracingTests(bool async) : TestBase +{ + #region Physical open + + [Test] + public async Task PhysicalOpen() + { + using var activityListener = StartListener(out var activities); + await using var dataSource = CreateDataSource(); + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + + Assert.That(activities, Has.Count.EqualTo(1)); + + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo("CONNECT " + connection.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo("CONNECT " + connection.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Unset)); + + Assert.That(activity.Events.Count(), Is.EqualTo(0)); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 5 : 6)); + + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags, Does.Not.ContainKey("db.query.text")); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task PhysicalOpen_error() + { + using var activityListener = StartListener(out var activities); + await using var dataSource = CreateDataSource(x => x.Host = "not-existing-host"); + var exception = Assert.ThrowsAsync(async () => + { + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + })!; + + var activity = GetSingleActivity(activities, "CONNECT " + dataSource.Settings.Database, "CONNECT " + dataSource.Settings.Database, ActivityStatusCode.Error, exception.Message); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo(exception.GetType().FullName)); + Assert.That(exceptionTags["exception.message"], Does.Contain(exception.Message)); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain(exception.Message)); + + var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(activityTags, Has.Count.EqualTo(3)); + + Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(dataSource.ConnectionString)); + + Assert.That(activityTags["error.type"], Is.EqualTo("System.Net.Sockets.SocketException")); + } + + [Test] + public async Task PhysicalOpen_disable() + { + using var activityListener = StartListener(out var activities); + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConfigureTracing(options => options.EnablePhysicalOpenTracing(enable: false)); + await using var dataSource = dataSourceBuilder.Build(); + + await using var connection = async ? await dataSource.OpenConnectionAsync() : dataSource.OpenConnection(); + + Assert.That(activities, Is.Empty); + } + + #endregion Physical open + + #region Command execution + + [Test] + public async Task CommandExecute([Values] bool batch) + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.Name = "TestTracingDataSource"; + dataSourceBuilder.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false)); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 42"); + + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var firstResponseEvent = activity.Events.First(); + Assert.That(firstResponseEvent.Name, Is.EqualTo("received-first-response")); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 6 : 7)); + + Assert.That(tags["db.query.text"], Is.EqualTo("SELECT 42")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo("TestTracingDataSource")); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task CommandExecute_error([Values] bool batch) + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + Assert.ThrowsAsync(async () => await ExecuteScalar(connection, async, batch, "SELECT * FROM non_existing_table")); + + var activity = GetSingleActivity(activities, "postgresql", "postgresql", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + + var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(activityTags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 8 : 9)); + + Assert.That(activityTags["db.query.text"], Is.EqualTo("SELECT * FROM non_existing_table")); + Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(activityTags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(activityTags["db.response.status_code"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + Assert.That(activityTags["error.type"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + + Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task CommandExecute_explicit_prepare([Values] bool batch) + { + await using var dataSource = CreateDataSource(o => o.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 42", prepare: false); + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Does.Not.ContainKey("db.npgsql.prepared")); + + activities.Clear(); + await ExecuteScalar(connection, async, batch, "SELECT 42", prepare: true); + activity = GetSingleActivity(activities, "postgresql", "postgresql"); + tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["db.npgsql.prepared"], Is.True); + } + + [Test] + public async Task CommandExecute_auto_prepare([Values] bool batch) + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.MaxPoolSize = 1; + dataSourceBuilder.ConnectionStringBuilder.MaxAutoPrepare = 10; + dataSourceBuilder.ConnectionStringBuilder.AutoPrepareMinUsages = 2; + dataSourceBuilder.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false)); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 42"); + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Does.Not.ContainKey("db.npgsql.prepared")); + + activities.Clear(); + await ExecuteScalar(connection, async, batch, "SELECT 42"); + activity = GetSingleActivity(activities, "postgresql", "postgresql"); + tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["db.npgsql.prepared"], Is.True); + } + + [Test] + public async Task CommandExecute_ConfigureTracing([Values] bool batch) + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConfigureTracing(options => + { + options + .EnablePhysicalOpenTracing(false) + .EnableFirstResponseEvent(enable: false) + .ConfigureCommandFilter(cmd => cmd.CommandText.Contains('2')) + .ConfigureBatchFilter(batch => batch.BatchCommands[0].CommandText.Contains('2')) + .ConfigureCommandSpanNameProvider(_ => "unknown_query") + .ConfigureBatchSpanNameProvider(_ => "unknown_query") + .ConfigureCommandEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")) + .ConfigureBatchEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")); + }); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 1"); + + Assert.That(activities, Is.Empty); + + await ExecuteScalar(connection, async, batch, "SELECT 2"); + + var activity = GetSingleActivity(activities, "unknown_query", "unknown_query"); + + Assert.That(activity.Events.Count(), Is.EqualTo(0)); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["custom_tag"], Is.EqualTo("custom_value")); + } + + #endregion Command execution + + #region Binary import + + [Test] + public async Task BinaryImport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) FROM STDIN BINARY"; + + if (async) + { + await using var writer = await connection.BeginBinaryImportAsync(copyFromCommand); + + await writer.StartRowAsync(); + await writer.WriteAsync("Hello"); + await writer.WriteAsync((short)8, NpgsqlDbType.Smallint); + + await writer.CompleteAsync(); + } + else + { + using var writer = connection.BeginBinaryImport(copyFromCommand); + + writer.StartRow(); + writer.Write("Hello"); + writer.Write((short)8, NpgsqlDbType.Smallint); + + writer.Complete(); + } + + var activity = GetSingleActivity(activities, "COPY FROM"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 8 : 9)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyFromCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY FROM")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(tags["db.npgsql.rows"], Is.EqualTo(1)); + + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task BinaryImport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) FROM STDIN BINARY"; + + if (async) + { + await using var writer = await connection.BeginBinaryImportAsync(copyFromCommand); + await writer.StartRowAsync(); + await writer.WriteAsync("Hello"); + await writer.WriteAsync((short)8, NpgsqlDbType.Smallint); + // No Complete() call - disposing cancels + } + else + { + using var writer = connection.BeginBinaryImport(copyFromCommand); + writer.StartRow(); + writer.Write("Hello"); + writer.Write((short)8, NpgsqlDbType.Smallint); + // No Complete() call - disposing cancels + } + + _ = GetSingleActivity(activities, "COPY FROM"); + } + + [Test] + public async Task BinaryImport_error() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY non_existing_table (field_text, field_int2) FROM STDIN BINARY"; + + var ex = Assert.ThrowsAsync(async () => + { + await using var writer = async + ? await connection.BeginBinaryImportAsync(copyFromCommand) + : connection.BeginBinaryImport(copyFromCommand); + }); + + var activity = GetSingleActivity(activities, "COPY FROM", "COPY FROM", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + } + + #endregion Binary import + + #region Binary export + + [Test] + public async Task BinaryExport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + await connection.ExecuteNonQueryAsync($"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY {table} (field_text, field_int2) TO STDOUT BINARY"; + + if (async) + { + await using var reader = await connection.BeginBinaryExportAsync(copyToCommand); + while (await reader.StartRowAsync() != -1) + { + _ = await reader.ReadAsync(); + _ = await reader.ReadAsync(); + } + } + else + { + using var reader = connection.BeginBinaryExport(copyToCommand); + while (reader.StartRow() != -1) + { + _ = reader.Read(); + _ = reader.Read(); + } + } + + var activity = GetSingleActivity(activities, "COPY TO"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 8 : 9)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyToCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY TO")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(tags["db.npgsql.rows"], Is.EqualTo(1)); + + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task BinaryExport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var conn = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + // This must be large enough to cause Postgres to queue up CopyData messages. + const string copyToCommand = "COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"; + + if (async) + { + await using var exporter = await conn.BeginBinaryExportAsync(copyToCommand); + await exporter.StartRowAsync(); + await exporter.ReadAsync(); + await exporter.CancelAsync(); + } + else + { + using var exporter = await conn.BeginBinaryExportAsync(copyToCommand); + exporter.StartRow(); + exporter.Read(); + exporter.Cancel(); + } + + _ = GetSingleActivity(activities, "COPY TO"); + } + + [Test] + public async Task BinaryExport_error() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY non_existing_table (field_text, field_int2) TO STDOUT BINARY"; + var ex = Assert.ThrowsAsync(async () => + { + await using var reader = async + ? await connection.BeginBinaryExportAsync(copyToCommand) + : connection.BeginBinaryExport(copyToCommand); + }); + + var activity = GetSingleActivity(activities, "COPY TO", "COPY TO", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + } + + #endregion Binary export + + #region Raw binary + + [Test] + public async Task RawBinaryExport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + await connection.ExecuteNonQueryAsync($"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"); + + using var activityListener = StartListener(out var activities); + + // Raw binary export + var copyToCommand = $"COPY {table} (field_text, field_int2) TO STDIN BINARY"; + var buffer = new byte[1024]; + if (async) + { + await using var stream = await connection.BeginRawBinaryCopyAsync(copyToCommand); + while (await stream.ReadAsync(buffer, 0, buffer.Length) > 0) { } + } + else + { + using var stream = connection.BeginRawBinaryCopy(copyToCommand); + while (stream.Read(buffer, 0, buffer.Length) > 0) { } + } + + var activity = GetSingleActivity(activities, "COPY"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 7 : 8)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyToCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY TO")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + + Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); + } + + [Test] + public async Task RawBinaryExport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + await connection.ExecuteNonQueryAsync($"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY {table} (field_text, field_int2) TO STDIN BINARY"; + var buffer = new byte[1024]; + if (async) + { + await using var stream = await connection.BeginRawBinaryCopyAsync(copyToCommand); + var _ = await stream.ReadAsync(buffer, 0, buffer.Length); + await stream.CancelAsync(); + } + else + { + using var stream = connection.BeginRawBinaryCopy(copyToCommand); + var _ = stream.Read(buffer, 0, buffer.Length); + stream.Cancel(); + } + + _ = GetSingleActivity(activities, "COPY"); + } + + [Test] + public async Task RawBinaryImport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY {table} (field_text, field_int2) FROM STDIN BINARY"; + byte[] garbage = [1, 2, 3, 4]; + if (async) + { + await using var stream = await connection.BeginRawBinaryCopyAsync(copyToCommand); + await stream.WriteAsync(garbage); + await stream.FlushAsync(); + await stream.CancelAsync(); + } + else + { + using var stream = connection.BeginRawBinaryCopy(copyToCommand); + stream.Write(garbage); + stream.Flush(); + stream.Cancel(); + } + + _ = GetSingleActivity(activities, "COPY"); + } + + [Test] + public async Task RawBinaryImport_error() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY non_existing_table (field_text, field_int2) FROM STDIN BINARY"; + var ex = Assert.ThrowsAsync(async () => + { + await using var stream = async + ? await connection.BeginRawBinaryCopyAsync(copyFromCommand) + : connection.BeginRawBinaryCopy(copyFromCommand); + }); + + var activity = GetSingleActivity(activities, "COPY", "COPY", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + } + + #endregion Raw binary + + #region Text COPY + + [Test] + public async Task TextImport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) FROM STDIN"; + + if (async) + { + await using var writer = await connection.BeginTextImportAsync(copyFromCommand); + await writer.WriteAsync("Hello\t8\n"); + } + else + { + using var writer = connection.BeginTextImport(copyFromCommand); + writer.Write("Hello\t8\n"); + } + + var activity = GetSingleActivity(activities, "COPY FROM"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 7 : 8)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyFromCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY FROM")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + + Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); + } + + [Test] + public async Task TextExport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + var insertCmd = $"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"; + await connection.ExecuteNonQueryAsync(insertCmd); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) TO STDIN"; + + var chars = new char[30]; + if (async) + { + await using var reader = await connection.BeginTextExportAsync(copyFromCommand); + _ = await reader.ReadAsync(chars); + } + else + { + using var reader = connection.BeginTextExport(copyFromCommand); + _ = reader.Read(chars); + } + + var activity = GetSingleActivity(activities, "COPY TO"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 7 : 8)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyFromCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY TO")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + + Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); + } + + // Text COPY is implemented over NpgsqlRawCopyStream internally, without any additional tracing-related logic. + // So we do only basic direct coverage and depend on the general raw tests for the rest. + + #endregion Text COPY + + // All ConfigureTracing() aspects of COPY are implemented in a single code path for all COPY paths, so we test just one. + + [Test] + public async Task Copy_ConfigureTracing() + { + await using var dataSource = CreateDataSource(builder => builder.ConfigureTracing(options => + options + .EnablePhysicalOpenTracing(false) + .ConfigureCopyOperationFilter(command => command.Contains("filter_in")) + .ConfigureCopyOperationSpanNameProvider(_ => "custom_binary_import") + .ConfigureCopyOperationEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")))); + + await using var conn = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(conn, "field_text TEXT, field_int_filter_in SMALLINT"); + var copyCommand = $"COPY {table} (field_text, field_int_filter_in) FROM STDIN BINARY"; + + var filteredOutTable = await CreateTempTable(conn, "field_text TEXT, field_int_filter_out SMALLINT"); + var filteredOutCopyCommand = $"COPY {filteredOutTable} (field_text, field_int_filter_out) FROM STDIN BINARY"; + + using var activityListener = StartListener(out var activities); + + + if (async) + { + await using (var writer = await conn.BeginBinaryImportAsync(copyCommand)) + { + await writer.CompleteAsync(); + } + + await using (var writer = await conn.BeginBinaryImportAsync(filteredOutCopyCommand)) + { + await writer.CompleteAsync(); + } + } + else + { + using (var writer = conn.BeginBinaryImport(copyCommand)) + { + writer.Complete(); + } + + using (var writer = conn.BeginBinaryImport(filteredOutCopyCommand)) + { + writer.Complete(); + } + } + + // There should be just one activity since one of the two COPY commands is filtered out + var activity = GetSingleActivity(activities, "custom_binary_import", "custom_binary_import"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["custom_tag"], Is.EqualTo("custom_value")); + } + + [Test] + public async Task Password_does_not_leak_via_datasource_name([Values] bool persistSecurityInfo) + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.PersistSecurityInfo = persistSecurityInfo; + // Do not set the data source name - this makes it default to the connection string, but without + // the password (even when Persist Security Info is true) + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, isBatch: false, query: "SELECT 42"); + + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + var connectionString = new NpgsqlConnectionStringBuilder((string)tags["db.npgsql.data_source"]!); + Assert.That(connectionString.Password, Is.Null); + } + + static ActivityListener StartListener(out List activities) + { + var a = new List(); + + var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => a.Add(activity) + }; + ActivitySource.AddActivityListener(activityListener); + + activities = a; + return activityListener; + } + + static Activity GetSingleActivity( + List activities, + string? expectedDisplayName, + string? expectedOperationName = null, + ActivityStatusCode? expectedStatusCode = null, + string? expectedStatusDescription = null) + { + Assert.That(activities, Has.Count.EqualTo(1)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo(expectedDisplayName)); + Assert.That(activity.OperationName, Is.EqualTo(expectedOperationName ?? expectedDisplayName)); + Assert.That(activity.Status, Is.EqualTo(expectedStatusCode ?? ActivityStatusCode.Unset)); + Assert.That(activity.StatusDescription, Is.EqualTo(expectedStatusDescription)); + + return activity; + } + + static async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query, bool prepare = false) + { + if (isBatch) + { + await using var batch = connection.CreateBatch(); + var batchCommand = batch.CreateBatchCommand(); + batchCommand.CommandText = query; + batch.BatchCommands.Add(batchCommand); + + if (prepare) + { + if (async) + await batch.PrepareAsync(); + else + batch.Prepare(); + } + + if (async) + return await batch.ExecuteScalarAsync(); + else + return batch.ExecuteScalar(); + } + else + { + await using var command = connection.CreateCommand(); + command.CommandText = query; + + if (prepare) + { + if (async) + await command.PrepareAsync(); + else + command.Prepare(); + } + + if (async) + return await command.ExecuteScalarAsync(); + else + return command.ExecuteScalar(); + } + } +} diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index 87b963d65d..ab94837a95 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using Npgsql.Internal; @@ -12,14 +12,11 @@ namespace Npgsql.Tests; -public class TransactionTests : MultiplexingTestBase +public class TransactionTests : TestBase { - [Test, Description("Basic insert within a commited transaction")] + [Test, Description("Basic insert within a committed transaction")] public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -37,18 +34,12 @@ public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(1)); } - // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException - // Because disposed NpgsqlTransaction might be reused by another thread - if (!IsMultiplexing) - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } - [Test, Description("Basic insert within a commited transaction")] + [Test, Description("Basic insert within a committed transaction")] public async Task CommitAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -72,9 +63,6 @@ public async Task CommitAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Pre [Test, Description("Basic insert within a rolled back transaction")] public async Task Rollback([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -98,9 +86,6 @@ public async Task Rollback([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepar [Test, Description("Basic insert within a rolled back transaction")] public async Task RollbackAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -180,13 +165,13 @@ public async Task Empty_rollback() [Test, Description("Disposes an empty transaction")] public async Task Empty_Dispose() { - using var _ = CreateTempPool(ConnectionString, out var connString); + await using var dataSource = CreateDataSource(); - using (var conn = await OpenConnectionAsync(connString)) + using (var conn = await dataSource.OpenConnectionAsync()) using (conn.BeginTransaction()) { } - using (var conn = await OpenConnectionAsync(connString)) + using (var conn = await dataSource.OpenConnectionAsync()) { // Make sure the pending BEGIN TRANSACTION didn't leak from the previous open Assert.That(async () => await conn.ExecuteNonQueryAsync("SAVEPOINT foo"), @@ -240,21 +225,12 @@ public async Task Default_IsolationLevel() tx.Rollback(); } - [Test, Description("Makes sure that transactions started in SQL work, except in multiplexing")] + [Test, Description("Makes sure that transactions started in SQL work")] public async Task Via_sql() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: not implemented"); - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); - if (IsMultiplexing) - { - Assert.That(async () => await conn.ExecuteNonQueryAsync("BEGIN"), Throws.Exception.TypeOf()); - return; - } - await conn.ExecuteNonQueryAsync("BEGIN"); await conn.ExecuteNonQueryAsync($"INSERT INTO {table} (name) VALUES ('X')"); await conn.ExecuteNonQueryAsync("ROLLBACK"); @@ -325,12 +301,9 @@ public async Task Failed_transaction_cannot_rollback_to_savepoint_with_custom_ti [IssueLink("https://github.com/npgsql/npgsql/issues/719")] public async Task Failed_transaction_on_close_with_custom_timeout() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Pooling = true - }.ToString(); + await using var dataSource = CreateDataSource(csb => csb.Pooling = true); - await using var conn = await OpenConnectionAsync(connString); + await using var conn = await dataSource.OpenConnectionAsync(); conn.BeginTransaction(); var backendProcessId = conn.ProcessID; @@ -350,9 +323,6 @@ public async Task Failed_transaction_on_close_with_custom_timeout() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/555")] public async Task Transaction_on_recycled_connection() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); - // Use application name to make sure we have our very own private connection pool await using var conn = new NpgsqlConnection(ConnectionString + $";Application Name={GetUniqueIdentifier(nameof(Transaction_on_recycled_connection))}"); conn.Open(); @@ -427,8 +397,8 @@ public async Task Savepoint_quoted() public async Task Savepoint_prepends() { await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); using var tx = conn.BeginTransaction(); @@ -500,18 +470,18 @@ public async Task IsCompleted_rollback_failed() [Parallelizable(ParallelScope.None)] public async Task Transaction_not_supported() { - if (IsMultiplexing) - Assert.Ignore("Need to rethink/redo dummy transaction mode"); + // TODO: rewrite to DataSource var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - ApplicationName = nameof(Transaction_not_supported) + IsMultiplexing + ApplicationName = nameof(Transaction_not_supported) }.ToString(); NpgsqlDatabaseInfo.RegisterFactory(new NoTransactionDatabaseInfoFactory()); try { - using var conn = await OpenConnectionAsync(connString); + using var conn = new NpgsqlConnection(connString); + await conn.OpenAsync(); using var tx = conn.BeginTransaction(); // Detect that we're not really in a transaction @@ -527,59 +497,24 @@ public async Task Transaction_not_supported() NpgsqlDatabaseInfo.ResetFactories(); } - using (var conn = await OpenConnectionAsync(connString)) + using (var conn = new NpgsqlConnection(connString)) { + await conn.OpenAsync(); NpgsqlConnection.ClearPool(conn); conn.ReloadTypes(); } // Check that everything is back to normal - using (var conn = await OpenConnectionAsync(connString)) - using (var tx = conn.BeginTransaction()) + using (var conn = new NpgsqlConnection(connString)) { - var prevTxId = conn.ExecuteScalar("SELECT txid_current()"); - var nextTxId = conn.ExecuteScalar("SELECT txid_current()"); - Assert.That(nextTxId, Is.EqualTo(prevTxId)); - } - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3248")] - // More at #3254 - public async Task Bug3248_Dispose_transaction_Rollback() - { - if (!IsMultiplexing) - return; - - using var conn = await OpenConnectionAsync(); - await using (var tx = conn.BeginTransaction()) - { - Assert.That(conn.Connector, Is.Not.Null); - Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM \"unknown_table\"", tx: tx), - Throws.Exception.TypeOf()); - Assert.That(conn.Connector, Is.Not.Null); + await conn.OpenAsync(); + using (var tx = conn.BeginTransaction()) + { + var prevTxId = conn.ExecuteScalar("SELECT txid_current()"); + var nextTxId = conn.ExecuteScalar("SELECT txid_current()"); + Assert.That(nextTxId, Is.EqualTo(prevTxId)); + } } - - Assert.That(conn.Connector, Is.Null); - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3248")] - // More at #3254 - public async Task Bug3248_Dispose_connection_Rollback() - { - if (!IsMultiplexing) - return; - - var conn = await OpenConnectionAsync(); - var tx = conn.BeginTransaction(); - Assert.That(conn.Connector, Is.Not.Null); - Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM \"unknown_table\"", tx: tx), - Throws.Exception.TypeOf()); - Assert.That(conn.Connector, Is.Not.Null); - - await conn.DisposeAsync(); - Assert.That(conn.Connector, Is.Null); } [Test] @@ -618,17 +553,16 @@ public async Task Access_connection_on_completed_transaction() [Test] public async Task Unbound_transaction_reuse() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MinPoolSize = 1, - MaxPoolSize = 1, - }; - using var __ = CreateTempPool(csb.ToString(), out var connectionString); + csb.MinPoolSize = 1; + csb.MaxPoolSize = 1; + }); await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); - await using var conn1 = await OpenConnectionAsync(connectionString); + await using var conn1 = await dataSource.OpenConnectionAsync(); var tx1 = conn1.BeginTransaction(); await using (var ___ = tx1) { @@ -645,7 +579,7 @@ public async Task Unbound_transaction_reuse() await conn1.CloseAsync(); } - await using var conn2 = await OpenConnectionAsync(connectionString); + await using var conn2 = await dataSource.OpenConnectionAsync(); var tx2 = conn2.BeginTransaction(); await using (var ___ = tx2) { @@ -663,7 +597,7 @@ public async Task Unbound_transaction_reuse() await conn2.CloseAsync(); } - await using var conn3 = await OpenConnectionAsync(connectionString); + await using var conn3 = await dataSource.OpenConnectionAsync(); var tx3 = conn3.BeginTransaction(); await using (var ___ = tx3) { @@ -685,15 +619,8 @@ public async Task Unbound_transaction_reuse() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3686")] public async Task Bug3686() { - if (IsMultiplexing) - return; - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Pooling = false - }; - - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); await conn.ExecuteNonQueryAsync("SELECT 1", tx); await tx.CommitAsync(); @@ -739,6 +666,4 @@ public void Bug184_Rollback_fails_on_aborted_transaction() t.Rollback(); } } - - public TransactionTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 15be807767..c06526bdba 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -1,85 +1,21 @@ -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; +using Npgsql.Internal; using NUnit.Framework; using System; +using System.Data; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; using System.Threading.Tasks; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.TypeMapping; +using NpgsqlTypes; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; public class TypeMapperTests : TestBase { -#pragma warning disable CS0618 // GlobalTypeMapper is obsolete - [Test, NonParallelizable] - public async Task Global_mapping() - { - await using var adminConnection = await OpenConnectionAsync(); - var type = await GetTempTypeName(adminConnection); - NpgsqlConnection.GlobalTypeMapper.MapEnum(type); - - try - { - await using var dataSource1 = CreateDataSource(); - - await using (var connection = await dataSource1.OpenConnectionAsync()) - { - await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); - await connection.ReloadTypesAsync(); - - await AssertType(connection, Mood.Happy, "happy", type, npgsqlDbType: null); - } - - NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); - - // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); - - // But they do affect on new data sources - await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); - } - finally - { - NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); - } - } - - [Test, NonParallelizable] - public async Task Global_mapping_reset() - { - await using var adminConnection = await OpenConnectionAsync(); - var type = await GetTempTypeName(adminConnection); - NpgsqlConnection.GlobalTypeMapper.MapEnum(type); - - try - { - await using var dataSource1 = CreateDataSource(); - - await using (var connection = await dataSource1.OpenConnectionAsync()) - { - await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); - await connection.ReloadTypesAsync(); - } - - // A global mapping change has no effects on data sources which have already been built - NpgsqlConnection.GlobalTypeMapper.Reset(); - - // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); - - // But they do affect on new data sources - await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); - } - finally - { - NpgsqlConnection.GlobalTypeMapper.Reset(); - } - } -#pragma warning restore CS0618 // GlobalTypeMapper is obsolete - [Test] public async Task ReloadTypes_across_connections_in_data_source() { @@ -89,7 +25,7 @@ public async Task ReloadTypes_across_connections_in_data_source() // via the data source. var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.MapEnum(); + dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); await using var connection1 = await dataSource.OpenConnectionAsync(); await using var connection2 = await dataSource.OpenConnectionAsync(); @@ -99,8 +35,8 @@ public async Task ReloadTypes_across_connections_in_data_source() // The data source type mapper has been replaced and connection1 should have the new mapper, but connection2 should retain the older // type mapper - where there's no mapping - as long as it's still open + Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); Assert.DoesNotThrowAsync(async () => await connection1.ExecuteScalarAsync($"SELECT 'happy'::{type}")); - Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); // Close connection2 and reopen to make sure it picks up the new type and mapping from the data source var connId = connection2.ProcessID; @@ -112,14 +48,13 @@ public async Task ReloadTypes_across_connections_in_data_source() } [Test] - [NonParallelizable] // Depends on citext which could be dropped concurrently public async Task String_to_citext() { await using var adminConnection = await OpenConnectionAsync(); await EnsureExtensionAsync(adminConnection, "citext"); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.AddTypeResolverFactory(new CitextToStringTypeHandlerResolverFactory()); + dataSourceBuilder.AddTypeInfoResolverFactory(new CitextToStringTypeHandlerResolverFactory()); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -128,8 +63,97 @@ public async Task String_to_citext() Assert.That(command.ExecuteScalar(), Is.True); } + [Test] + public async Task String_to_citext_with_db_type_string() + { + await using var adminConnection = await OpenConnectionAsync(); + await EnsureExtensionAsync(adminConnection, "citext"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + ((INpgsqlTypeMapper)dataSourceBuilder).AddDbTypeResolverFactory(new ForceStringToCitextResolverFactory()); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var command = new NpgsqlCommand("SELECT @p = 'hello'::citext", connection); + var parameter = new NpgsqlParameter("p", DbType.String) + { + Value = "HeLLo" + }; + command.Parameters.Add(parameter); + + Assert.That(command.ExecuteScalar(), Is.True); + Assert.That(parameter.DbType, Is.EqualTo(DbType.String)); + Assert.That(parameter.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Citext)); + Assert.That(parameter.DataTypeName, Is.EqualTo("citext")); + } + + [Test] + public async Task Guid_to_custom_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.AddTypeInfoResolverFactory(new GuidTextConverterFactory(type)); + ((INpgsqlTypeMapper)dataSourceBuilder).AddDbTypeResolverFactory(new GuidTextDbTypeResolverFactory(type)); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type}"); + await connection.ExecuteNonQueryAsync($""" + -- Input: cstring -> Custom type + CREATE FUNCTION {type}_in(cstring) + RETURNS {type} + AS 'textin' + LANGUAGE internal IMMUTABLE STRICT; + + -- Output: Custom type -> cstring + CREATE FUNCTION {type}_out({type}) + RETURNS cstring + AS 'textout' + LANGUAGE internal IMMUTABLE STRICT; + + -- 3️⃣ Create wrappers for binary I/O + CREATE FUNCTION {type}_recv(internal) + RETURNS {type} + AS 'textrecv' + LANGUAGE internal IMMUTABLE STRICT; + + CREATE FUNCTION {type}_send({type}) + RETURNS bytea + AS 'textsend' + LANGUAGE internal IMMUTABLE STRICT; + """); + + await connection.ExecuteNonQueryAsync($""" + CREATE TYPE {type} ( + internallength = variable, + input = {type}_in, + output = {type}_out, + receive = {type}_recv, + send = {type}_send, + alignment = int4 + ); + CREATE CAST ({type} AS text) WITH INOUT AS IMPLICIT; + """); + await connection.ReloadTypesAsync(); + + var guid = Guid.NewGuid(); + await using var command = new NpgsqlCommand($"SELECT @p::text = '{guid}'", connection); + var parameter = new NpgsqlParameter("p", DbType.Guid) + { + Value = guid + }; + command.Parameters.Add(parameter); + + Assert.That(command.ExecuteScalar(), Is.True); + Assert.That(parameter.DbType, Is.EqualTo(DbType.Guid)); + Assert.That(parameter.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown)); + Assert.That(parameter.DataTypeName, Is.EqualTo(type)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4582")] - [NonParallelizable] // Drops extension + [NonParallelizable] // Drops global citext extension. public async Task Type_in_non_default_schema() { await using var conn = await OpenConnectionAsync(); @@ -160,31 +184,110 @@ await conn.ExecuteNonQueryAsync(@$" #region Support - class CitextToStringTypeHandlerResolverFactory : TypeHandlerResolverFactory + class CitextToStringTypeHandlerResolverFactory : PgTypeInfoResolverFactory { - public override TypeHandlerResolver Create(NpgsqlConnector connector) - => new CitextToStringTypeHandlerResolver(connector); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; - public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); - public override string GetDataTypeNameByClrType(Type clrType) => throw new NotSupportedException(); - public override string GetDataTypeNameByValueDependentValue(object value) => throw new NotSupportedException(); + sealed class Resolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type == typeof(string) || dataTypeName?.UnqualifiedName == "citext") + if (options.DatabaseInfo.TryGetPostgresTypeByName("citext", out var pgType)) + return new PgConcreteTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); - class CitextToStringTypeHandlerResolver : TypeHandlerResolver + return null; + } + } + + } + + class ForceStringToCitextResolverFactory : DbTypeResolverFactory + { + public override IDbTypeResolver CreateDbTypeResolver(NpgsqlDatabaseInfo databaseInfo) => new DbTypeResolver(); + + sealed class DbTypeResolver : IDbTypeResolver { - readonly NpgsqlConnector _connector; - readonly PostgresType _pgCitextType; + public string? GetDataTypeName(DbType dbType, Type? type) + { + if (dbType == DbType.String) + return "citext"; + + return null; + } - public CitextToStringTypeHandlerResolver(NpgsqlConnector connector) + public DbType? GetDbType(DataTypeName dataTypeName) { - _connector = connector; - _pgCitextType = connector.DatabaseInfo.GetPostgresTypeByName("citext"); + if (dataTypeName.UnqualifiedName == "citext") + return DbType.String; + + return null; } + } + } - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => type == typeof(string) ? new TextHandler(_pgCitextType, _connector.TextEncoding) : null; - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => null; + class GuidTextConverterFactory(string typeName) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + public override IPgTypeInfoResolver CreateResolver() => new GuidTextTypeInfoResolver(typeName); + + sealed class GuidTextTypeInfoResolver(string typeName) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type == typeof(Guid) || dataTypeName?.UnqualifiedName == typeName) + if (options.DatabaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) + return new PgConcreteTypeInfo(options, new GuidTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + + return null; + } + } + + sealed class GuidTextConverter(Encoding encoding) : PgStreamingConverter + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override Guid Read(PgReader reader) + => Guid.Parse(encoding.GetString(reader.ReadBytes(reader.CurrentRemaining))); + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Guid.Parse(encoding.GetString(await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false))); - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); + public override Size GetSize(SizeContext context, Guid value, ref object? writeState) + => TextConverterHelpers.GetSize(ref context, value.ToString().AsMemory(), encoding); + + public override void Write(PgWriter writer, Guid value) + => writer.WriteChars(value.ToString().AsSpan(), encoding); + + public override ValueTask WriteAsync(PgWriter writer, Guid value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(value.ToString().AsMemory(), encoding, cancellationToken); + } + } + + class GuidTextDbTypeResolverFactory(string typeName) : DbTypeResolverFactory + { + public override IDbTypeResolver CreateDbTypeResolver(NpgsqlDatabaseInfo databaseInfo) => new DbTypeResolver(typeName); + + sealed class DbTypeResolver(string typeName) : IDbTypeResolver + { + public string? GetDataTypeName(DbType dbType, Type? type) + { + if (dbType == DbType.Guid) + return typeName; + return null; + } + + public DbType? GetDbType(DataTypeName dataTypeName) + { + if (dataTypeName == typeName) + return DbType.Guid; + return null; + } } } diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 8a048067d0..538e3f0cc2 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -1,162 +1,76 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; +using System.Collections.Immutable; using System.Data; -using System.Linq; +using System.Diagnostics; using System.Text; using System.Threading.Tasks; -using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.Converters; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; +// ReSharper disable BitwiseOperatorOnEnumWithoutFlags + /// /// Tests on PostgreSQL arrays /// /// /// https://www.postgresql.org/docs/current/static/arrays.html /// -public class ArrayTests : MultiplexingTestBase +public class ArrayTests : TestBase { - [Test, Description("Resolves an array type handler via the different pathways")] - public async Task Array_resolution() - { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Array_resolution), // Prevent backend type caching in TypeHandlerRegistry - Pooling = false - }; - - using var conn = await OpenConnectionAsync(csb); - - // Resolve type by NpgsqlDbType - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.AddWithValue("p", NpgsqlDbType.Array | NpgsqlDbType.Integer, DBNull.Value); - using var reader = await cmd.ExecuteReaderAsync(); - - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - } - - // Resolve type by ClrType (type inference) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = new int[0] }); - using var reader = await cmd.ExecuteReaderAsync(); - - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - } - - // Resolve type by DataTypeName - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName="p", DataTypeName = "integer[]", Value = DBNull.Value }); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - } - } - - // Resolve type by OID (read) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT '{1, 3}'::INTEGER[]", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[] { 1, 3 })); - } - } + static readonly TestCaseData[] ArrayTestCases = + [ + new TestCaseData(new[] { 1, 2, 3 }, "{1,2,3}", "integer[]") + .SetName("Integer_array"), + new TestCaseData(Array.Empty(), "{}", "integer[]") + .SetName("Empty_array"), + new TestCaseData(new[,] { { 1, 2, 3 }, { 7, 8, 9 } }, "{{1,2,3},{7,8,9}}", "integer[]") + .SetName("Two_dimensional_array"), + new TestCaseData( + new[,] { { "a", "bb", "ccc" }, { "dddd", "eeeee", "ffffff" } }, + """{{a,bb,ccc},{dddd,eeeee,ffffff}}""", + "text[]") + .SetName("Two_dimensional_variable_size_array"), + new TestCaseData(new[] { [1, 2], new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]") + .SetName("Bytea_array") + ]; + + [Test, TestCaseSource(nameof(ArrayTestCases))] + public Task Arrays(T array, string sqlLiteral, string dataTypeName) + => AssertType(array, sqlLiteral, dataTypeName); [Test] - public async Task Bind_int_then_array_of_int() - { - using var pool = CreateTempPool(ConnectionString, out var connString); - using var conn = new NpgsqlConnection(connString); - await conn.OpenAsync(); - - using var cmd = new NpgsqlCommand("SELECT 1", conn); - _ = await cmd.ExecuteScalarAsync(); - - cmd.CommandText = "SELECT ARRAY[1,2]"; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new[] { 1, 2 })); - } - - [Test, Description("Roundtrips a simple, one-dimensional array of ints")] - public async Task Ints() + public async Task NullableInts() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); - - var expected = new[] { 1, 5, 9 }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - var p3 = new NpgsqlParameter("p3", expected); - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - cmd.Parameters.Add(p3); - p1.Value = expected; - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - for (var i = 0; i < cmd.Parameters.Count; i++) + var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString) { - Assert.That(reader.GetValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetValue(i), Is.TypeOf()); - Assert.That(reader.GetProviderSpecificValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(Array))); - Assert.That(reader.GetProviderSpecificFieldType(i), Is.EqualTo(typeof(Array))); - } - } - - [Test, Description("Roundtrips a simple, one-dimensional array of int? values")] - public async Task Nullable_ints() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); - - var expected = new int?[] { 1, 5, null, 9 }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - var p3 = new NpgsqlParameter("p3", expected); - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - cmd.Parameters.Add(p3); - p1.Value = expected; - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); + ArrayNullabilityMode = ArrayNullabilityMode.Always + }; + var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionStringBuilder.ToString()); + await using var dataSource = dataSourceBuilder.Build(); - for (var i = 0; i < cmd.Parameters.Count; i++) - { - Assert.That(reader.GetFieldValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue>(i), Is.EqualTo(expected.ToList())); - Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(Array))); - Assert.That(reader.GetProviderSpecificFieldType(i), Is.EqualTo(typeof(Array))); - } + await AssertType(dataSource, new int?[] { 1, 2, null, 3 }, "{1,2,NULL,3}", "integer[]"); } - [Test, Description("Checks that PG arrays containing nulls can't be read as CLR arrays of non-nullable value types.")] + [Test, Description("Checks that PG arrays containing nulls can't be read as CLR arrays of non-nullable value types (the default).")] public async Task Nullable_ints_cannot_be_read_as_non_nullable() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT '{1, NULL, 2}'::integer[]", conn); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); + => await AssertTypeUnsupportedRead("{1,NULL,2}", "int[]"); - Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); - Assert.That(() => reader.GetFieldValue>(0), Throws.Exception.TypeOf()); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + [Test] + public async Task Throws_too_many_dimensions() + { + await using var conn = CreateConnection(); + await conn.OpenAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + cmd.Parameters.AddWithValue("p", new int[1, 1, 1, 1, 1, 1, 1, 1, 1]); // 9 dimensions + Assert.That( + () => cmd.ExecuteScalarAsync(), + Throws.Exception.TypeOf().With.Message.EqualTo("Postgres arrays can have at most 8 dimensions. (Parameter 'dimensionLengths')")); } [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] @@ -165,12 +79,14 @@ public async Task Nullable_ints_cannot_be_read_as_non_nullable() [TestCase(ArrayNullabilityMode.PerInstance)] public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) { - using var pool = CreateTempPool(new NpgsqlConnectionStringBuilder(ConnectionString){ ArrayNullabilityMode = mode}, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); - await using var cmd = new NpgsqlCommand("SELECT onedim, twodim FROM (VALUES" + - "('{1, 2, 3, 4}'::int[],'{{1, 2},{3, 4}}'::int[][])," + - "('{5, NULL, 6, 7}'::int[],'{{5, NULL},{6, 7}}'::int[][])" + - ") AS x(onedim,twodim)", conn); + await using var dataSource = CreateDataSource(csb => csb.ArrayNullabilityMode = mode); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( +""" +SELECT onedim, twodim FROM (VALUES +('{1, 2, 3, 4}'::int[],'{{1, 2},{3, 4}}'::int[][]), +('{5, NULL, 6, 7}'::int[],'{{5, NULL},{6, 7}}'::int[][])) AS x(onedim,twodim) +""", conn); await using var reader = await cmd.ExecuteReaderAsync(); switch (mode) @@ -186,9 +102,9 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) Assert.That(reader.GetValue(1), Is.EqualTo(new [,]{{1, 2}, {3, 4}})); reader.Read(); Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); - Assert.That(() => reader.GetValue(1), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetValue(1), Throws.Exception.TypeOf()); break; case ArrayNullabilityMode.Always: reader.Read(); @@ -231,57 +147,165 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) Assert.That(value, Is.EqualTo(new int?[,]{{5, null},{6, 7}})); break; default: - throw new ArgumentOutOfRangeException(nameof(mode), mode, null); + throw new UnreachableException($"Unknown case {mode}"); } } - [Test] - public async Task Empty_array() + [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] + [TestCase(ArrayNullabilityMode.Always)] + [TestCase(ArrayNullabilityMode.Never)] + [TestCase(ArrayNullabilityMode.PerInstance)] + public async Task Value_type_array_nullabilities_type_info_provider(ArrayNullabilityMode mode) { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = new int[0] }); - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); + await using var dataSource = CreateDataSource(csb => + { + csb.ArrayNullabilityMode = mode; + csb.Timezone = "Europe/Berlin"; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( +""" +SELECT onedim, twodim FROM (VALUES +('{"1998-04-12 15:26:38+02"}'::timestamptz[],'{{"1998-04-12 15:26:38+02"},{"1998-04-13 15:26:38+02"}}'::timestamptz[][]), +('{"1998-04-14 15:26:38+02", NULL}'::timestamptz[],'{{"1998-04-14 15:26:38+02", NULL},{"1998-04-15 15:26:38+02", "1998-04-16 15:26:38+02"}}'::timestamptz[][])) AS x(onedim,twodim) +""", conn); + await using var reader = await cmd.ExecuteReaderAsync(); - Assert.That(reader.GetFieldValue(0), Is.SameAs(Array.Empty())); - Assert.That(reader.GetFieldValue(0), Is.SameAs(Array.Empty())); + switch (mode) + { + case ArrayNullabilityMode.Never: + reader.Read(); + var value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[]))); + Assert.That(value, Is.EqualTo(new []{new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc)})); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[,]))); + Assert.That(value, Is.EqualTo(new [,] + { + { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) }, + { new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc) } + })); + reader.Read(); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(() => reader.GetValue(1), Throws.Exception.TypeOf()); + break; + case ArrayNullabilityMode.Always: + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[]))); + Assert.That(value, Is.EqualTo(new DateTime?[]{new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc)})); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[,]))); + Assert.That(value, Is.EqualTo(new DateTime?[,] + { + { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) }, + { new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc) } + })); + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[]))); + Assert.That(value, Is.EqualTo(new DateTime?[]{ new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null })); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[,]))); + Assert.That(value, Is.EqualTo(new DateTime?[,] + { + { new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null }, + { new DateTime(1998, 4, 15, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 16, 13, 26, 38, DateTimeKind.Utc) } + })); + break; + case ArrayNullabilityMode.PerInstance: + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[]))); + Assert.That(value, Is.EqualTo(new []{new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc)})); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[,]))); + Assert.That(value, Is.EqualTo(new [,] + { + { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) }, + { new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc) } + })); + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[]))); + Assert.That(value, Is.EqualTo(new DateTime?[]{ new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null })); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[,]))); + Assert.That(value, Is.EqualTo(new DateTime?[,] + { + { new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null }, + { new DateTime(1998, 4, 15, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 16, 13, 26, 38, DateTimeKind.Utc) } + })); + break; + default: + throw new UnreachableException($"Unknown case {mode}"); + } } - [Test, Description("Roundtrips an empty multi-dimensional array.")] - public async Task Empty_multidimensional_array() + // Note that PG normalizes empty multidimensional arrays to single-dimensional, e.g. ARRAY[[], []]::integer[] returns {}. + [Test] + public async Task Write_empty_multidimensional_array() + => await AssertTypeWrite(new int[0, 0], "{}", "integer[]"); + + [Test] + public async Task Generic_List() + => await AssertType( + new List { 1, 2, 3 }, "{1,2,3}", "integer[]", valueTypeEqualsFieldType: false); + + [Test] + public async Task Write_IList_implementation() + => await AssertTypeWrite( + ImmutableArray.Create(1, 2, 3), "{1,2,3}", "integer[]"); + + [Test] + public void Read_IList_implementation_throws() + => Assert.ThrowsAsync(() => + AssertTypeRead("{1,2,3}", "integer[]", ImmutableArray.Create(1, 2, 3), valueTypeEqualsFieldType: false)); + + [Test] + public async Task Generic_IList() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); - var expected = new int[0, 0]; - cmd.Parameters.AddWithValue("p", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); + var expected = ImmutableArray.Create(1,2,3); + cmd.Parameters.Add(new NpgsqlParameter>("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer) { TypedValue = expected }); var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); + Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); } [Test, Description("Verifies that an InvalidOperationException is thrown when the returned array has a different number of dimensions from what was requested.")] public async Task Wrong_array_dimensions_throws() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT ARRAY[[1], [2]]", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT ARRAY[[1], [2]]", conn); var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - var ex = Assert.Throws(() => reader.GetFieldValue(0))!; - Assert.That(ex.Message, Is.EqualTo("Cannot read an array with 1 dimension(s) from an array with 2 dimension(s)")); + var ex = Assert.Throws(() => reader.GetFieldValue(0))!; + Assert.That(ex.Message, Does.StartWith("Cannot read an array value with 2 dimensions into a collection type with 1 dimension")); } [Test, Description("Verifies that an attempt to read an Array of value types that contains null values as array of a non-nullable type fails.")] public async Task Read_null_as_non_nullable_array_throws() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); var expected = new int?[] { 1, 5, null, 9 }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); @@ -291,16 +315,16 @@ public async Task Read_null_as_non_nullable_array_throws() Assert.That( () => reader.GetFieldValue(0), - Throws.Exception.TypeOf() - .With.Message.EqualTo(ArrayHandler.ReadNonNullableCollectionWithNullsExceptionMessage)); + Throws.Exception.TypeOf() + .With.Message.EqualTo(ArrayConverterCore.ReadNonNullableCollectionWithNullsExceptionMessage)); } [Test, Description("Verifies that an attempt to read an Array of value types that contains null values as List of a non-nullable type fails.")] public async Task Read_null_as_non_nullable_list_throws() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); var expected = new int?[] { 1, 5, null, 9 }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); @@ -310,24 +334,24 @@ public async Task Read_null_as_non_nullable_list_throws() Assert.That( () => reader.GetFieldValue>(0), - Throws.Exception.TypeOf() - .With.Message.EqualTo(ArrayHandler.ReadNonNullableCollectionWithNullsExceptionMessage)); + Throws.Exception.TypeOf() + .With.Message.EqualTo(ArrayConverterCore.ReadNonNullableCollectionWithNullsExceptionMessage)); } [Test, Description("Roundtrips a large, one-dimensional array of ints that will be chunked")] public async Task Long_one_dimensional() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var expected = new int[conn.Settings.WriteBufferSize/4 + 100]; for (var i = 0; i < expected.Length; i++) expected[i] = i; - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var p = new NpgsqlParameter {ParameterName = "p", Value = expected}; cmd.Parameters.Add(p); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); reader.Read(); Assert.That(reader[0], Is.EqualTo(expected)); } @@ -335,228 +359,83 @@ public async Task Long_one_dimensional() [Test, Description("Roundtrips a large, two-dimensional array of ints that will be chunked")] public async Task Long_two_dimensional() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var len = conn.Settings.WriteBufferSize/2 + 100; var expected = new int[2, len]; for (var i = 0; i < len; i++) expected[0, i] = i; for (var i = 0; i < len; i++) expected[1, i] = i; - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var p = new NpgsqlParameter {ParameterName = "p", Value = expected}; cmd.Parameters.Add(p); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); reader.Read(); Assert.That(reader[0], Is.EqualTo(expected)); } + [Test, Description("Reads an one-dimensional array with lower bound != 0")] + public Task Read_non_zero_lower_bounded() + => AssertTypeRead("[2:3]={ 8, 9 }", "integer[]", new[] { 8, 9 }); + + [Test, Description("Reads an one-dimensional array with lower bound != 0")] + public Task Read_non_zero_lower_bounded_multidimensional() + => AssertTypeRead("[2:3][2:3]={ {8,9}, {1,2} }", "integer[]", new[,] { { 8, 9 }, { 1, 2 }}); + [Test, Description("Roundtrips a long, one-dimensional array of strings, including a null")] public async Task Strings_with_null() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var largeString = new StringBuilder(); largeString.Append('a', conn.Settings.WriteBufferSize); var expected = new[] {"value1", null, largeString.ToString(), "val3"}; - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Text) {Value = expected}; cmd.Parameters.Add(p); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); reader.Read(); Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); } - [Test, Description("Roundtrips a zero-dimensional array of ints, should return empty one-dimensional")] - public async Task Zero_dimensional() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = new int[0]; - var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = expected }; - cmd.Parameters.Add(p); - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetProviderSpecificValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - cmd.Dispose(); - } - - [Test, Description("Roundtrips a two-dimensional array of ints")] - public async Task Two_dimensional_ints() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - var expected = new[,] { { 1, 2, 3 }, { 7, 8, 9 } }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - p1.Value = expected; - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetProviderSpecificValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - } - - [Test, Description("Reads an one-dimensional array with lower bound != 0")] - public async Task Read_non_zero_lower_bounded() - { - using var conn = await OpenConnectionAsync(); - using (var cmd = new NpgsqlCommand("SELECT '[2:3]={ 8, 9 }'::INT[]", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[] {8, 9})); - } - - using (var cmd = new NpgsqlCommand("SELECT '[2:3][2:3]={ {8,9}, {1,2} }'::INT[][]", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[,] {{8, 9}, {1, 2}})); - } - } - - [Test, Description("Roundtrips a one-dimensional array of bytea values")] - public async Task Array_of_byte_arrays() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - var expected = new[] { new byte[] { 1, 2 }, new byte[] { 3, 4, } }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Bytea); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - p1.Value = expected; - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); - Assert.That(reader.GetProviderSpecificFieldType(0), Is.EqualTo(typeof(Array))); - } - - - [Test, Description("Roundtrips a non-generic IList as an array")] - // ReSharper disable once InconsistentNaming - public async Task IList_non_generic() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = new ArrayList(new[] { 1, 2, 3 }); - var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = expected }; - cmd.Parameters.Add(p); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(expected.ToArray())); - } - - [Test, Description("Roundtrips a generic List as an array")] - // ReSharper disable once InconsistentNaming - public async Task IList_generic() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - var expected = new[] { 1, 2, 3 }.ToList(); - var p1 = new NpgsqlParameter { ParameterName = "p1", Value = expected }; - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue>(1), Is.EqualTo(expected)); - } - - [Test, Description("Tests for failure when reading a generic IList from a multidimensional array")] - // ReSharper disable once InconsistentNaming - public async Task IList_generic_fails_for_multidimensional_array() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); - var expected = new[,] { { 1, 2 }, { 3, 4 } }; - var p1 = new NpgsqlParameter { ParameterName = "p1", Value = expected }; - cmd.Parameters.Add(p1); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - var exception = Assert.Throws(() => - { - reader.GetFieldValue>(0); - })!; - Assert.That(exception.Message, Is.EqualTo("Can't read multidimensional array as List")); - } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/844")] - public async Task IEnumerable_throws_friendly_exception() + public async Task Writing_IEnumerable_is_not_supported() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", Enumerable.Range(1, 3)); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Message.Contains("array or List")); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); + cmd.Parameters.AddWithValue("p1", new EnumerableOnly()); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Property("InnerException").Message.Contains("array or some implementation of IList")); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/960")] - public async Task Mixed_element_types() + class EnumerableOnly : IEnumerable { - var mixedList = new ArrayList { 1, "yo" }; - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, mixedList); - Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception - .TypeOf() - .With.Message.Contains("mix")); + public IEnumerator GetEnumerator() => throw new NotImplementedException(); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/960")] public async Task Jagged_arrays_not_supported() { - var jagged = new int[2][]; - jagged[0] = new[] { 8 }; - jagged[1] = new[] { 8, 10 }; - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, jagged); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, new[] { [8], new[] { 8, 10 } }); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception - .TypeOf() - .With.Message.Contains("jagged")); - } - - [Test, Description("Checks that ILists are properly serialized as arrays of their underlying types")] - public async Task List_type_resolution() - { - using var conn = await OpenConnectionAsync(ConnectionString); - await AssertIListRoundtrips(conn, new[] { 1, 2, 3 }); - await AssertIListRoundtrips(conn, new IntList { 1, 2, 3 }); - await AssertIListRoundtrips(conn, new MisleadingIntList() { 1, 2, 3 }); - } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1546")] - public void Generic_List_get_NpgsqlDbType() - { - var p = new NpgsqlParameter - { - ParameterName = "p1", - Value = new List { 1, 2, 3 } - }; - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Array | NpgsqlDbType.Integer)); + .TypeOf() + .With.Property("InnerException").Message.Contains("jagged")); } [Test, Description("Roundtrips one-dimensional and two-dimensional arrays of a PostgreSQL domain.")] public async Task Array_of_domain() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - using var conn = await OpenConnectionAsync(); - TestUtil.MinimumPgVersion(conn, "11.0", "Arrays of domains were introduced in PostgreSQL 11"); - conn.ExecuteNonQuery("CREATE DOMAIN pg_temp.posint AS integer CHECK (VALUE > 0);"); - conn.ReloadTypes(); - using var cmd = new NpgsqlCommand("SELECT @p1::posint[], @p2::posint[][]", conn); + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "11.0", "Arrays of domains were introduced in PostgreSQL 11"); + await conn.ExecuteNonQueryAsync("CREATE DOMAIN pg_temp.posint AS integer CHECK (VALUE > 0);"); + await conn.ReloadTypesAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1::posint[], @p2::posint[][]", conn); var oneDim = new[] { 1, 3, 5, 9 }; var twoDim = new[,] { { 1, 3 }, { 5, 9 } }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Integer | NpgsqlDbType.Array, oneDim); cmd.Parameters.AddWithValue("p2", NpgsqlDbType.Integer | NpgsqlDbType.Array, twoDim); - using var reader = cmd.ExecuteReader(); + await using var reader = cmd.ExecuteReader(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(oneDim)); @@ -576,20 +455,20 @@ public async Task Array_of_domain() [Test, Description("Roundtrips a PostgreSQL domain over a one-dimensional and a two-dimensional array.")] public async Task Domain_of_array() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - using var conn = await OpenConnectionAsync(); - TestUtil.MinimumPgVersion(conn, "11.0", "Domains over arrays were introduced in PostgreSQL 11"); - conn.ExecuteNonQuery("CREATE DOMAIN pg_temp.int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 4);" + - "CREATE DOMAIN pg_temp.int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 2);"); - conn.ReloadTypes(); - using var cmd = new NpgsqlCommand("SELECT @p1::int_array_1d, @p2::int_array_2d", conn); + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "11.0", "Domains over arrays were introduced in PostgreSQL 11"); + await conn.ExecuteNonQueryAsync( +""" +CREATE DOMAIN pg_temp.int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 4); +CREATE DOMAIN pg_temp.int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 2); +"""); + await conn.ReloadTypesAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1::int_array_1d, @p2::int_array_2d", conn); var oneDim = new[] { 1, 3, 5, 9 }; var twoDim = new[,] { { 1, 3 }, { 5, 9 } }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Integer | NpgsqlDbType.Array, oneDim); cmd.Parameters.AddWithValue("p2", NpgsqlDbType.Integer | NpgsqlDbType.Array, twoDim); - using var reader = cmd.ExecuteReader(); + await using var reader = cmd.ExecuteReader(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(oneDim)); @@ -608,28 +487,42 @@ public async Task Domain_of_array() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3417")] public async Task Read_two_empty_arrays() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT '{}'::INT[], '{}'::INT[]", conn); - using var reader = await cmd.ExecuteReaderAsync(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT '{}'::INT[], '{}'::INT[]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - Assert.AreSame(reader.GetFieldValue(0), reader.GetFieldValue(1)); + Assert.That(reader.GetFieldValue(1), Is.SameAs(reader.GetFieldValue(0))); // Unlike T[], List is mutable so we should not return the same instance - Assert.AreNotSame(reader.GetFieldValue>(0), reader.GetFieldValue>(1)); + Assert.That(reader.GetFieldValue>(1), Is.Not.SameAs(reader.GetFieldValue>(0))); } - async Task AssertIListRoundtrips(NpgsqlConnection conn, IEnumerable value) + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1271")] + public async Task Generics_read_empty_multidim_array() { - using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = value }); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("select ARRAY[[], []]::integer[]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + Assert.That(reader.GetFieldValue(0).Length, Is.Zero); + } - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - Assert.That(reader[0], Is.EqualTo(value.ToArray())); + [Test] + public async Task Arrays_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertTypeUnsupportedRead("{1,2,3}", "integer[]", dataSource); + await AssertTypeUnsupportedWrite([1, 2, 3], "integer[]", dataSource); } - class IntList : List { } - class MisleadingIntList : List { } + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableArrays() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + await using var dataSource = dataSourceBuilder.Build(); - public ArrayTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + await AssertType(dataSource, new[] { 1, 2, 3 }, "{1,2,3}", "integer[]"); + } } diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index ac02dab77e..0ef6481ffd 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -1,6 +1,7 @@ -using System; +using System; using System.Collections; using System.Collections.Specialized; +using System.Data; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -13,7 +14,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-bit.html /// -public class BitStringTests : MultiplexingTestBase +public class BitStringTests : TestBase { [Test] [TestCase("10110110", TestName = "BitArray")] @@ -27,10 +28,10 @@ public async Task BitArray(string sqlLiteral) for (var i = 0; i < sqlLiteral.Length; i++) bitArray[i] = sqlLiteral[i] == '1'; - await AssertType(bitArray, sqlLiteral, "bit varying", NpgsqlDbType.Varbit); + await AssertType(bitArray, sqlLiteral, "bit varying"); if (len > 0) - await AssertType(bitArray, sqlLiteral, $"bit({len})", NpgsqlDbType.Bit, isDefaultForWriting: false); + await AssertType(bitArray, sqlLiteral, $"bit({len})", dataTypeInference: DataTypeInference.Mismatch); } [Test] @@ -47,21 +48,21 @@ public async Task BitArray_long() [Test] public Task BitVector32() => AssertType( - new BitVector32(4), "00000000000000000000000000000100", "bit varying", NpgsqlDbType.Varbit, isDefaultForReading: false); + new BitVector32(4), "00000000000000000000000000000100", "bit varying", valueTypeEqualsFieldType: false); [Test] public Task BitVector32_too_long() - => AssertTypeUnsupportedRead(new string('0', 34), "bit varying"); + => AssertTypeUnsupportedRead(new string('0', 34), "bit varying"); [Test] public Task Bool() - => AssertType(true, "1", "bit(1)", NpgsqlDbType.Bit, isDefault: false); + => AssertType(true, "1", "bit(1)", dataTypeInference: DataTypeInference.Mismatch, dbType: new(DbType.Object, DbType.Boolean)); [Test] public async Task Bitstring_with_multiple_bits_as_bool_throws() { - await AssertTypeUnsupportedRead("01", "varbit"); - await AssertTypeUnsupportedRead("01", "bit(2)"); + await AssertTypeUnsupportedRead("01", "varbit"); + await AssertTypeUnsupportedRead("01", "bit(2)"); } [Test] @@ -69,7 +70,7 @@ public async Task Array() { using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = new[] { new BitArray(new[] { true, false, true }), new BitArray(new[] { false }) }; + var expected = new[] { new BitArray([true, false, true]), new BitArray([false]) }; var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Varbit) { Value = expected }; cmd.Parameters.Add(p); p.Value = expected; @@ -99,16 +100,30 @@ public async Task Array_of_single_bits() } [Test] - public Task Write_as_string() - => AssertTypeWrite("010101", "010101", "bit varying", NpgsqlDbType.Varbit, isDefault: false); + public async Task Array_of_single_bits_and_null() + { + var dataSource = CreateDataSource(builder => builder.ArrayNullabilityMode = ArrayNullabilityMode.Always); + using var conn = await dataSource.OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p::BIT(1)[]", conn); + var expected = new bool?[] { true, false, null }; + var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Bit) {Value = expected}; + cmd.Parameters.Add(p); + p.Value = expected; + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + var x = reader.GetValue(0); + Assert.That(reader.GetValue(0), Is.EqualTo(expected)); + Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + } [Test] - public Task Write_as_string_validation() - => AssertTypeUnsupportedWrite("001q0", "bit varying"); + public Task As_string() + => AssertType("010101", "010101", + "bit varying", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test] - public Task Read_as_string_is_not_supported() - => AssertTypeUnsupportedRead("010101", "bit varying"); - - public BitStringTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file + public Task Write_as_string_validation() + => AssertTypeUnsupportedWrite("001q0", "bit varying"); +} diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 56f533dba4..5765848f5a 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -1,7 +1,8 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.IO; +using System.Net.Sockets; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -14,13 +15,13 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-binary.html /// -public class ByteaTests : MultiplexingTestBase +public class ByteaTests : TestBase { [Test] [TestCase(new byte[] { 1, 2, 3, 4, 5 }, "\\x0102030405", TestName = "Bytea")] [TestCase(new byte[] { }, "\\x", TestName = "Bytea_empty")] public Task Bytea(byte[] byteArray, string sqlLiteral) - => AssertType(byteArray, sqlLiteral, "bytea", NpgsqlDbType.Bytea, DbType.Binary); + => AssertType(byteArray, sqlLiteral, "bytea", dbType: DbType.Binary); [Test] public async Task Bytea_long() @@ -34,52 +35,58 @@ public async Task Bytea_long() await Bytea(array, sqlLiteral); } -#if !NETSTANDARD2_0 [Test] - public Task Write_as_Memory() - => AssertTypeWrite( - new Memory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); - - [Test] - public Task Read_as_Memory_not_supported() - => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); - - [Test] - public Task Write_as_ReadOnlyMemory() - => AssertTypeWrite( - new ReadOnlyMemory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + public Task AsMemory() + => AssertType( + new Memory([1, 2, 3]), "\\x010203", "bytea", dbType: DbType.Binary, + comparer: (left, right) => left.Span.SequenceEqual(right.Span), + valueTypeEqualsFieldType: false); [Test] - public Task Read_as_ReadOnlyMemory_not_supported() - => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); -#endif + public Task AsReadOnlyMemory() + => AssertType( + new ReadOnlyMemory([1, 2, 3]), "\\x010203", "bytea", dbType: DbType.Binary, + comparer: (left, right) => left.Span.SequenceEqual(right.Span), + valueTypeEqualsFieldType: false); [Test] - public Task Write_as_ArraySegment() - => AssertTypeWrite( - new ArraySegment(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); - - [Test] - public Task Read_as_ArraySegment_not_supported() - => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); + public Task AsArraySegment() + => AssertType(new ArraySegment([1, 2, 3]), "\\x010203", + "bytea", dbType: DbType.Binary, valueTypeEqualsFieldType: false); [Test] public Task Write_as_MemoryStream() => AssertTypeWrite( - () => new MemoryStream(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => new MemoryStream([1, 2, 3]), "\\x010203", "bytea", dbType: DbType.Binary); [Test] public Task Write_as_MemoryStream_truncated() { var msFactory = () => { - var ms = new MemoryStream(new byte[] { 1, 2, 3, 4 }); + var ms = new MemoryStream([1, 2, 3, 4]); ms.ReadByte(); return ms; }; - return AssertTypeWrite( - msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + return AssertTypeWrite(valueFactory: msFactory, "\\x020304", "bytea", dbType: DbType.Binary); + } + + [Test] + public Task Write_as_MemoryStream_exposableArray() + { + var msFactory = () => + { + var ms = new MemoryStream(20); + ms.WriteByte(1); + ms.WriteByte(2); + ms.WriteByte(3); + ms.WriteByte(4); + ms.Position = 1; + return ms; + }; + + return AssertTypeWrite(valueFactory: msFactory, "\\x020304", "bytea", dbType: DbType.Binary); } [Test] @@ -90,8 +97,7 @@ public async Task Write_as_MemoryStream_long() rnd.NextBytes(bytes); var expectedSql = "\\x" + ToHex(bytes); - await AssertTypeWrite( - () => new MemoryStream(bytes), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + await AssertTypeWrite(() => new MemoryStream(bytes), expectedSql, "bytea", dbType: DbType.Binary); } [Test] @@ -101,10 +107,9 @@ public async Task Write_as_FileStream() var fsList = new List(); try { - await File.WriteAllBytesAsync(filePath, new byte[] { 1, 2, 3 }); + await File.WriteAllBytesAsync(filePath, [1, 2, 3]); - await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + await AssertTypeWrite(() => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", dbType: DbType.Binary); } finally { @@ -139,8 +144,7 @@ public async Task Write_as_FileStream_long() await File.WriteAllBytesAsync(filePath, bytes); var expectedSql = "\\x" + ToHex(bytes); - await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + await AssertTypeWrite(() => FileStreamFactory(filePath, fsList), expectedSql, "bytea", dbType: DbType.Binary); } finally { @@ -185,17 +189,19 @@ public async Task Truncate_array() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p", conn); - byte[] data = { 1, 2, 3, 4, 5, 6 }; + byte[] data = [1, 2, 3, 4, 5, 6]; var p = new NpgsqlParameter("p", data) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); + Assert.That(p.Value, Is.EqualTo(new byte[] { 1, 2, 3, 4 }), "Truncated parameter value should be persisted on the parameter per DbParameter.Size docs"); // NpgsqlParameter.Size needs to persist when value is changed - byte[] data2 = { 11, 12, 13, 14, 15, 16 }; + byte[] data2 = [11, 12, 13, 14, 15, 16]; p.Value = data2; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 11, 12, 13, 14 })); // NpgsqlParameter.Size larger than the value size should mean the value size, as well as 0 and -1 + p.Value = data2; p.Size = data2.Length + 10; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2)); p.Size = 0; @@ -207,18 +213,17 @@ public async Task Truncate_array() } [Test, Description("Tests that bytea stream values are truncated when the NpgsqlParameter's Size is set")] - [NonParallelizable] // The last check will break the connection, which can fail other unrelated queries in multiplexing public async Task Truncate_stream() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p", conn); - byte[] data = { 1, 2, 3, 4, 5, 6 }; + byte[] data = [1, 2, 3, 4, 5, 6]; var p = new NpgsqlParameter("p", new MemoryStream(data)) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); // NpgsqlParameter.Size needs to persist when value is changed - byte[] data2 = { 11, 12, 13, 14, 15, 16 }; + byte[] data2 = [11, 12, 13, 14, 15, 16]; p.Value = new MemoryStream(data2); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 11, 12, 13, 14 })); @@ -237,13 +242,9 @@ public async Task Truncate_stream() Assert.That(() => p.Size = -2, Throws.Exception.TypeOf()); - // NpgsqlParameter.Size larger than the value size should throw - p.Size = data2.Length + 10; p.Value = new MemoryStream(data2); - var ex = Assert.ThrowsAsync(async () => await cmd.ExecuteScalarAsync())!; - Assert.That(ex.InnerException, Is.TypeOf()); - if (!IsMultiplexing) - Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + p.Size = data2.Length + 10; + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2)); } [Test] @@ -251,7 +252,7 @@ public async Task Write_as_NonSeekable_stream() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p", conn); - byte[] data = { 1, 2, 3, 4, 5, 6 }; + byte[] data = [1, 2, 3, 4, 5, 6]; var p = new NpgsqlParameter("p", new NonSeekableStream(data)) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); @@ -263,7 +264,7 @@ public async Task Write_as_NonSeekable_stream() p.Value = new NonSeekableStream(data); p.Size = 0; - Assert.ThrowsAsync(async () => await cmd.ExecuteScalarAsync()); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data)); Assert.That(conn.State, Is.EqualTo(ConnectionState.Open)); } @@ -276,19 +277,26 @@ public async Task Array_of_bytea() var inVal = new[] { bytes, bytes }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Bytea | NpgsqlDbType.Array, inVal); var retVal = (byte[][]?)await cmd.ExecuteScalarAsync(); - Assert.AreEqual(inVal.Length, retVal!.Length); - Assert.AreEqual(inVal[0], retVal[0]); - Assert.AreEqual(inVal[1], retVal[1]); + Assert.That(retVal!.Length, Is.EqualTo(inVal.Length)); + Assert.That(retVal[0], Is.EqualTo(inVal[0])); + Assert.That(retVal[1], Is.EqualTo(inVal[1])); } - sealed class NonSeekableStream : MemoryStream + [Test] + public async Task InvalidCastException_unknown_stream_read() { - public override bool CanSeek => false; - - public NonSeekableStream(byte[] data) : base(data) + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT :p1", conn); + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Bytea, new byte[] { 1 }); + await using var reader = await cmd.ExecuteReaderAsync(); + while (await reader.ReadAsync()) { + Assert.Throws(() => reader.GetFieldValue(0)); } } - public ByteaTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + sealed class NonSeekableStream(byte[] data) : MemoryStream(data) + { + public override bool CanSeek => false; + } } diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs index 9e252e2d1b..732dbf83e1 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs @@ -19,36 +19,40 @@ async Task Read(T composite, Action, T> assert, string? schema = null { await using var dataSource = await OpenAndMapComposite(composite, schema, nameof(Read), out var name); await using var connection = await dataSource.OpenConnectionAsync(); - await using var command = new NpgsqlCommand($"SELECT ROW({composite.GetValues()})::{name}", connection); + + var literal = $"ROW({composite.GetValues()})::{name}"; + var arrayLiteral = $"ARRAY[{literal}]::{name}[]"; + await using var command = new NpgsqlCommand($"SELECT {literal}, {arrayLiteral}", connection); await using var reader = command.ExecuteReader(); await reader.ReadAsync(); assert(() => reader.GetFieldValue(0), composite); + assert(() => reader.GetFieldValue(1)[0], composite); } [Test] public Task Read_class_with_property() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_class_with_field() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_struct_with_property() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_struct_with_field() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_type_with_two_properties() => Read((execute, expected) => { var actual = execute(); - Assert.AreEqual(expected.IntValue, actual.IntValue); - Assert.AreEqual(expected.StringValue, actual.StringValue); + Assert.That(actual.IntValue, Is.EqualTo(expected.IntValue)); + Assert.That(actual.StringValue, Is.EqualTo(expected.StringValue)); }); [Test] @@ -56,13 +60,14 @@ public Task Read_type_with_two_properties_inverted() => Read((execute, expected) => { var actual = execute(); - Assert.AreEqual(expected.IntValue, actual.IntValue); - Assert.AreEqual(expected.StringValue, actual.StringValue); + Assert.That(actual.IntValue, Is.EqualTo(expected.IntValue)); + Assert.That(actual.StringValue, Is.EqualTo(expected.StringValue)); }); [Test] public Task Read_type_with_private_property_throws() => - Read(new TypeWithPrivateProperty(), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithPrivateProperty(), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_private_getter() => @@ -93,21 +98,24 @@ public Task Read_type_with_more_properties_than_attributes() => Read(new TypeWithMorePropertiesThanAttributes(), (execute, expected) => { var actual = execute(); - Assert.That(actual.IntValue, Is.Not.Null); + Assert.That((int?)actual.IntValue, Is.Not.Null); Assert.That(actual.StringValue, Is.Null); }); [Test] public Task Read_type_with_less_properties_than_attributes_throws() => - Read(new TypeWithLessPropertiesThanAttributes(), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithLessPropertiesThanAttributes(), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_less_parameters_than_attributes_throws() => - Read(new TypeWithLessParametersThanAttributes(TheAnswer), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithLessParametersThanAttributes(TheAnswer), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_more_parameters_than_attributes_throws() => - Read(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_one_parameter() => diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs index 938ac9f01a..800270f7c3 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs @@ -19,51 +19,67 @@ async Task Write(T composite, Action? assert = null, str { await using var dataSource = await OpenAndMapComposite(composite, schema, nameof(Write), out var _); await using var connection = await dataSource.OpenConnectionAsync(); - await using var command = new NpgsqlCommand("SELECT (@c).*", connection); + { + await using var command = new NpgsqlCommand("SELECT (@c).*", connection); + + command.Parameters.AddWithValue("c", composite); + await using var reader = await command.ExecuteReaderAsync(); + await reader.ReadAsync(); + + if (assert is not null) + assert(reader, composite); + } + + { + await using var command = new NpgsqlCommand("SELECT (@arrayc)[1].*", connection); + + command.Parameters.AddWithValue("arrayc", new[] { composite }); + await using var reader = await command.ExecuteReaderAsync(); + await reader.ReadAsync(); - command.Parameters.AddWithValue("c", composite); - await using var reader = await command.ExecuteReaderAsync(); - await reader.ReadAsync(); - if (assert is not null) - assert(reader, composite); + if (assert is not null) + assert(reader, composite); + } } [Test] public Task Write_class_with_property() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_class_with_field() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_struct_with_property() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_struct_with_field() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_type_with_two_properties() => Write((reader, expected) => { - Assert.AreEqual(expected.IntValue, reader.GetInt32(0)); - Assert.AreEqual(expected.StringValue, reader.GetString(1)); + Assert.That(reader.GetInt32(0), Is.EqualTo(expected.IntValue)); + Assert.That(reader.GetString(1), Is.EqualTo(expected.StringValue)); }); [Test] public Task Write_type_with_two_properties_inverted() => Write((reader, expected) => { - Assert.AreEqual(expected.IntValue, reader.GetInt32(1)); - Assert.AreEqual(expected.StringValue, reader.GetString(0)); + Assert.That(reader.GetInt32(1), Is.EqualTo(expected.IntValue)); + Assert.That(reader.GetString(0), Is.EqualTo(expected.StringValue)); }); [Test] public void Write_type_with_private_property_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithPrivateProperty())); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithPrivateProperty())); [Test] public void Write_type_with_private_getter_throws() @@ -95,13 +111,19 @@ public Task Write_type_with_more_properties_than_attributes() [Test] public void Write_type_with_less_properties_than_attributes_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithLessPropertiesThanAttributes())); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithLessPropertiesThanAttributes())); [Test] public void Write_type_with_less_parameters_than_attributes_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik))); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik))); [Test] public void Write_type_with_more_parameters_than_attributes_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithLessParametersThanAttributes(TheAnswer))); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithLessParametersThanAttributes(TheAnswer))); } diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.cs index 1df95980a3..cc84efd094 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.cs @@ -155,10 +155,9 @@ public class TypeWithExplicitPropertyName : SimpleComposite protected override string GetValue() => MyValue; } - public class TypeWithExplicitParameterName : SimpleComposite + public class TypeWithExplicitParameterName([PgName("value")] string myValue) : SimpleComposite { - public TypeWithExplicitParameterName([PgName("value")] string myValue) => Value = myValue; - public string Value { get; } + public string Value { get; } = myValue; protected override string GetValue() => Value; } @@ -178,81 +177,72 @@ public class TypeWithLessPropertiesThanAttributes : IComposite public int IntValue { get; set; } } - public class TypeWithMoreParametersThanAttributes : IComposite + public class TypeWithMoreParametersThanAttributes(int intValue, string? stringValue) : IComposite { public string GetAttributes() => "int_value integer"; public string GetValues() => $"{IntValue}"; - public TypeWithMoreParametersThanAttributes(int intValue, string? stringValue) - { - IntValue = intValue; - StringValue = stringValue; - } - - public int IntValue { get; set; } - public string? StringValue { get; set; } + public int IntValue { get; set; } = intValue; + public string? StringValue { get; set; } = stringValue; } - public class TypeWithLessParametersThanAttributes : IComposite + public class TypeWithLessParametersThanAttributes(int intValue) : IComposite { public string GetAttributes() => "int_value integer, string_value text"; public string GetValues() => $"{IntValue}, NULL"; - public TypeWithLessParametersThanAttributes(int intValue) => - IntValue = intValue; - - public int IntValue { get; } + public int IntValue { get; } = intValue; } - public class TypeWithOneParameter : IComposite + public class TypeWithOneParameter(int value1) : IComposite { public string GetAttributes() => "value1 integer"; public string GetValues() => $"{Value1}"; - public TypeWithOneParameter(int value1) => Value1 = value1; - public int Value1 { get; } + public int Value1 { get; } = value1; } - public class TypeWithTwoParameters : IComposite + public class TypeWithTwoParameters(int intValue, string stringValue) : IComposite { public string GetAttributes() => "int_value integer, string_value text"; public string GetValues() => $"{IntValue}, '{StringValue}'"; - public TypeWithTwoParameters(int intValue, string stringValue) => - (IntValue, StringValue) = (intValue, stringValue); - - public int IntValue { get; } - public string? StringValue { get; } + public int IntValue { get; } = intValue; + public string? StringValue { get; } = stringValue; } - public class TypeWithTwoParametersReversed : IComposite + public class TypeWithTwoParametersReversed(string stringValue, int intValue) : IComposite { public string GetAttributes() => "int_value integer, string_value text"; public string GetValues() => $"{IntValue}, '{StringValue}'"; - public TypeWithTwoParametersReversed(string stringValue, int intValue) => - (StringValue, IntValue) = (stringValue, intValue); - - public int IntValue { get; } - public string? StringValue { get; } + public int IntValue { get; } = intValue; + public string? StringValue { get; } = stringValue; } - public class TypeWithNineParameters : IComposite + public class TypeWithNineParameters( + int value1, + int value2, + int value3, + int value4, + int value5, + int value6, + int value7, + int value8, + int value9) + : IComposite { public string GetAttributes() => "value1 integer, value2 integer, value3 integer, value4 integer, value5 integer, value6 integer, value7 integer, value8 integer, value9 integer"; public string GetValues() => $"{Value1}, {Value2}, {Value3}, {Value4}, {Value5}, {Value6}, {Value7}, {Value8}, {Value9}"; - public TypeWithNineParameters(int value1, int value2, int value3, int value4, int value5, int value6, int value7, int value8, int value9) - => (Value1, Value2, Value3, Value4, Value5, Value6, Value7, Value8, Value9) = (value1, value2, value3, value4, value5, value6, value7, value8, value9); - - public int Value1 { get; } - public int Value2 { get; } - public int Value3 { get; } - public int Value4 { get; } - public int Value5 { get; } - public int Value6 { get; } - public int Value7 { get; } - public int Value8 { get; } - public int Value9 { get; } + public int Value1 { get; } = value1; + public int Value2 { get; } = value2; + public int Value3 { get; } = value3; + public int Value4 { get; } = value4; + public int Value5 { get; } = value5; + public int Value6 { get; } = value6; + public int Value7 { get; } = value7; + public int Value8 { get; } = value8; + public int Value9 { get; } = value9; } } diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 75bf214fff..4c6ebff7eb 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -1,5 +1,7 @@ -using System; +using System; using System.Linq; +using System.Net; +using System.Reflection; using System.Threading.Tasks; using Npgsql.PostgresTypes; using NpgsqlTypes; @@ -8,7 +10,7 @@ namespace Npgsql.Tests.Types; -public class CompositeTests : MultiplexingTestBase +public class CompositeTests : TestBase { [Test] public async Task Basic() @@ -28,7 +30,57 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task Basic_with_custom_default_translator() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int, s text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.DefaultNameTranslator = new CustomTranslator(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeComposite { SomeText = "foo", X = 8 }, + "(8,foo)", + type, + dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task Basic_with_custom_translator() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int, s text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type, new CustomTranslator()); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeComposite { SomeText = "foo", X = 8 }, + "(8,foo)", + type, + dataTypeInference: DataTypeInference.Nothing); + } + + class CustomTranslator : INpgsqlNameTranslator + { + public string TranslateTypeName(string clrName) => throw new NotImplementedException(); + + public string TranslateMemberName(string clrName) => clrName[0].ToString().ToLowerInvariant(); } #pragma warning disable CS0618 // GlobalTypeMapper is obsolete @@ -54,7 +106,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); } finally { @@ -87,7 +139,7 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", containerType, - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1168")] @@ -108,7 +160,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", $"{schema}.some_composite", - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4365")] @@ -138,15 +190,36 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{secondSchemaName}.container", - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); await AssertType( connection, new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{firstSchemaName}.container", - npgsqlDbType: null, - isDefaultForWriting: false); + dataTypeInference: DataTypeInference.Nothing); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5972")] + public async Task With_schema_and_dots_in_type_name() + { + await using var adminConnection = await OpenConnectionAsync(); + var schema = await CreateTempSchema(adminConnection); + var typename = "Some.Composite.with.dots"; + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {schema}.\"{typename}\" AS (x int, some_text text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite($"{schema}.{typename}"); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeComposite { SomeText = "foobar", X = 10 }, + "(10,foobar)", + $"{schema}.\"{typename}\"", + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -167,7 +240,7 @@ await AssertType( new SomeCompositeStruct { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -188,7 +261,7 @@ await AssertType( new SomeComposite[] { new() { SomeText = "foo", X = 8 }, new() { SomeText = "bar", X = 9 }}, @"{""(8,foo)"",""(9,bar)""}", type + "[]", - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -210,7 +283,7 @@ await AssertType( new NameTranslationComposite { Simple = 2, TwoWords = 3, SomeClrName = 4 }, "(2,3,4)", type, - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/856")] @@ -234,7 +307,189 @@ await AssertType( new Address { PostalCode = "12345", Street = "Main St." }, @"(""Main St."",12345)", compositeType, - npgsqlDbType: null); + dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task Composite_containing_array_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (ints int4[])"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithArray { Ints = [1, 2, 3, 4] }, + @"(""{1,2,3,4}"")", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.Ints!.SequenceEqual(expected.Ints!)); + } + + [Test] + public async Task Composite_containing_enum_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var enumType = await GetTempTypeName(adminConnection); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {enumType} AS enum ('value1', 'value2', 'value3'); +CREATE TYPE {compositeType} AS (enum_value {enumType});"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + dataSourceBuilder.MapEnum(enumType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithEnum { EnumValue = SomeCompositeWithEnum.TestEnum.Value2 }, + @"(value2)", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.EnumValue == expected.EnumValue); + } + + [Test] + public async Task Composite_containing_IPAddress() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (address inet)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithIPAddress { Address = IPAddress.Loopback }, + @"(127.0.0.1)", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.Address!.Equals(expected.Address)); + } + + [Test] + public async Task Composite_containing_type_info_provider_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (date_times timestamp[])"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithTypeInfoProviderType { DateTimes = [new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified), new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified).AddDays(1) + ] + }, + """("{""1970-01-01 00:00:00"",""1970-01-02 00:00:00""}")""", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!)); + } + + [Test] + public async Task Composite_containing_type_info_provider_type_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (date_times timestamp[])"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + Assert.ThrowsAsync(() => AssertType( + connection, + new SomeCompositeWithTypeInfoProviderType { DateTimes = [DateTime.UnixEpoch] }, // UTC DateTime + """("{""1970-01-01 01:00:00"",""1970-01-02 01:00:00""}")""", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!))); + } + + // A composite whose only provider-backed field has a fixed-size default concrete (plain timestamp, + // 8 bytes). Exercises the path where the composite's combined write size is exact but gets clamped + // externally because a field defers to a provider: GetSize fires for bind-time resolution, observes + // that no field produced write state, skips the WriteState allocation, and Write proceeds to call the relevant converter. + [Test] + public async Task Composite_containing_fixed_size_type_info_provider_field() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (id int, created_at timestamp)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithFixedSizeTypeInfoProviderField + { + Id = 42, + CreatedAt = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Unspecified) + }, + """(42,"1970-01-01 00:00:00")""", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.Id == expected.Id && actual.CreatedAt == expected.CreatedAt); + } + + // Companion to the above — confirms that deterministic provider-level errors (DateTime kind + // mismatch against plain timestamp) still surface when the field is fixed-size, now via the + // bind-time GetSize checkpoint instead of the first Write. + [Test] + public async Task Composite_containing_fixed_size_type_info_provider_field_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (id int, created_at timestamp)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + Assert.ThrowsAsync(() => AssertType( + connection, + new SomeCompositeWithFixedSizeTypeInfoProviderField + { + Id = 42, + CreatedAt = DateTime.UnixEpoch // UTC — incompatible with plain timestamp + }, + """(42,"1970-01-01 00:00:00")""", + compositeType, + dataTypeInference: DataTypeInference.Nothing)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/990")] @@ -245,8 +500,7 @@ public async Task Table_as_composite([Values] bool enabled) var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(table); - if (enabled) - dataSourceBuilder.ConnectionStringBuilder.LoadTableComposites = true; + dataSourceBuilder.ConfigureTypeLoading(b => b.EnableTableCompositesLoading(enabled)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -254,19 +508,17 @@ public async Task Table_as_composite([Values] bool enabled) await DoAssertion(); else { - Assert.ThrowsAsync(DoAssertion); - Assert.Null(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table))); - Assert.Null(connection.Connector!.DatabaseInfo.ArrayTypes.SingleOrDefault(c => c.Name.Contains(table))); + Assert.ThrowsAsync(DoAssertion); + Assert.That(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table)), Is.Null); + Assert.That(connection.Connector!.DatabaseInfo.ArrayTypes.SingleOrDefault(c => c.Name.Contains(table)), Is.Null); } Task DoAssertion() => AssertType( - connection, - new SomeComposite { SomeText = "foo", X = 8 }, - "(8,foo)", - table, - npgsqlDbType: null); + dataSource, + new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", + table, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1267")] @@ -277,17 +529,14 @@ public async Task Table_as_composite_with_deleted_columns() await adminConnection.ExecuteNonQueryAsync($"ALTER TABLE {table} DROP COLUMN bar;"); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.LoadTableComposites = true; + dataSourceBuilder.ConfigureTypeLoading(b => b.EnableTableCompositesLoading()); dataSourceBuilder.MapComposite(table); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new SomeComposite { SomeText = "foo", X = 8 }, - "(8,foo)", - table, - npgsqlDbType: null); + dataSource, + new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", + table, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1125")] @@ -301,21 +550,16 @@ public async Task Nullable_property_in_class_composite() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new ClassWithNullableProperty { Foo = 8 }, - "(8)", - type, - npgsqlDbType: null); + dataSource, + new ClassWithNullableProperty { Foo = 8 }, "(8)", + type, dataTypeInference: DataTypeInference.Nothing); await AssertType( - connection, - new ClassWithNullableProperty { Foo = null }, - "()", - type, - npgsqlDbType: null); + dataSource, + new ClassWithNullableProperty { Foo = null }, "()", + type, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1125")] @@ -329,27 +573,24 @@ public async Task Nullable_property_in_struct_composite() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new StructWithNullableProperty { Foo = 8 }, - "(8)", - type, - npgsqlDbType: null); + dataSource, + new StructWithNullableProperty { Foo = 8 }, "(8)", + type, dataTypeInference: DataTypeInference.Nothing); await AssertType( - connection, - new StructWithNullableProperty { Foo = null }, - "()", - type, - npgsqlDbType: null); + dataSource, + new StructWithNullableProperty { Foo = null }, "()", + type, dataTypeInference: DataTypeInference.Nothing); } [Test] public async Task PostgresType() { - await using var connection = await OpenConnectionAsync(); + // Set max pool size to 1 to ensure we execute queries on the connection which has the new types + await using var dataSource = CreateDataSource(connectionStringBuilderAction: csb => csb.MaxPoolSize = 1); + await using var connection = await dataSource.OpenConnectionAsync(); var type1 = await GetTempTypeName(connection); var type2 = await GetTempTypeName(connection); @@ -380,8 +621,151 @@ await connection.ExecuteNonQueryAsync(@$" Assert.That(elemType, Is.SameAs(comp1Type)); } + [Test] + public async Task DuplicateConstructorParameters() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (long int8, boolean bool)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + + var ex = Assert.ThrowsAsync(async () => await AssertType( + dataSource, + new DuplicateOneLongOneBool(true, 1), + "(1,t)", + type, dataTypeInference: DataTypeInference.Nothing)); + Assert.That(ex!.InnerException, Is.TypeOf()); + } + + [Test] + public async Task PartialConstructorMissingSetter() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (long int8, boolean bool)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + + var ex = Assert.ThrowsAsync(async () => await AssertTypeRead( + dataSource, + "(1,t)", + type, + new MissingSetterOneLongOneBool(true, 1))); + Assert.That(ex, Is.TypeOf().With.Message.Contains("No (public) setter for")); + } + + [Test] + public async Task PartialConstructorWorks() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (long int8, boolean bool)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new OneLongOneBool(1) { BooleanValue = true }, "(1,t)", + type, dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task CompositeOverRange() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int, some_text text); CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + + var composite1 = new SomeComposite + { + SomeText = "foo", + X = 8 + }; + + var composite2 = new SomeComposite + { + SomeText = "bar", + X = 42 + }; + + await AssertType( + dataSource, + new NpgsqlRange(composite1, composite2), + "[\"(8,foo)\",\"(42,bar)\"]", + rangeType, dataTypeInference: DataTypeInference.Nothing); + } + #region Test Types +#pragma warning disable CS9113 + readonly struct DuplicateOneLongOneBool(bool boolean, [PgName("boolean")] int @bool) + { + [PgName("long")] + public long LongValue { get; } + + [PgName("boolean")] + public bool BooleanValue { get; } + } +#pragma warning restore CS9113 + + readonly struct MissingSetterOneLongOneBool + { + public MissingSetterOneLongOneBool(long @long) + => LongValue = @long; + + public MissingSetterOneLongOneBool(bool boolean, [PgName("boolean")]int @bool) + { + } + + [PgName("long")] + public long LongValue { get; } + + [PgName("boolean")] + public bool BooleanValue { get; } + } + + struct OneLongOneBool + { + public OneLongOneBool(bool boolean, [PgName("boolean")]int @bool) + { + } + + public OneLongOneBool(long @long) + => LongValue = @long; + + public OneLongOneBool(double other) + { + } + + public OneLongOneBool(int boolean, [PgName("boolean")]bool @bool) + { + } + + [PgName("long")] + public long LongValue { get; } + + [PgName("boolean")] + public bool BooleanValue { get; set; } + } + + record SomeComposite { public int X { get; set; } @@ -400,6 +784,39 @@ struct SomeCompositeStruct public string SomeText { get; set; } } + class SomeCompositeWithArray + { + public int[]? Ints { get; set; } + } + + class SomeCompositeWithEnum + { + public enum TestEnum + { + Value1, + Value2, + Value3 + } + + public TestEnum EnumValue { get; set; } + } + + class SomeCompositeWithIPAddress + { + public IPAddress? Address { get; set; } + } + + class SomeCompositeWithTypeInfoProviderType + { + public DateTime[]? DateTimes { get; set; } + } + + class SomeCompositeWithFixedSizeTypeInfoProviderField + { + public int Id { get; set; } + public DateTime CreatedAt { get; set; } + } + record NameTranslationComposite { public int Simple { get; set; } @@ -424,7 +841,5 @@ struct StructWithNullableProperty public int? Foo { get; set; } } - public CompositeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} - #endregion } diff --git a/test/Npgsql.Tests/Types/CubeTests.cs b/test/Npgsql.Tests/Types/CubeTests.cs new file mode 100644 index 0000000000..9c98438ab7 --- /dev/null +++ b/test/Npgsql.Tests/Types/CubeTests.cs @@ -0,0 +1,267 @@ +using System; +using System.Threading.Tasks; +using Npgsql.Properties; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests.Types; + +public class CubeTests : TestBase +{ + static readonly TestCaseData[] CubeValues = + { + new TestCaseData(new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }, new[] { 4.0, 5.0, 6.0 }), "(1, 2, 3),(4, 5, 6)") + .SetName("Cube_MultiDimensional"), + new TestCaseData(new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }), "(1, 2, 3)") + .SetName("Cube_MultiDimensionalPoint"), + new TestCaseData(new NpgsqlCube(1.0), "(1)") + .SetName("Cube_SingleDimensionalPoint"), + new TestCaseData(new NpgsqlCube(1.0, 2.0), "(1),(2)") + .SetName("Cube_SingleDimensional") + }; + + [Test, TestCaseSource(nameof(CubeValues))] + public Task Cube(NpgsqlCube cube, string sqlLiteral) + => AssertType(cube, sqlLiteral, "cube", dataTypeInference: DataTypeInference.Nothing); + + [Test] + public void Cube_Constructor_SingleValue() + { + var cube = new NpgsqlCube(1.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(1)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0 })); + } + + [Test] + public void Cube_Constructor_SingleCoord_Point() + { + var cube = new NpgsqlCube(1.0, 1.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(1)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0 })); + } + + [Test] + public void Cube_Constructor_SingleCoord_NotPoint() + { + var cube = new NpgsqlCube(1.0, 2.0); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(1)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 2.0 })); + } + + [Test] + public void Cube_Constructor_LowerLeft_UpperRight_NotPoint() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(2)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 3.0, 4.0 })); + } + + [Test] + public void Cube_Constructor_LowerLeft_UpperRight_Point() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 1.0, 2.0 }); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(2)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0, 2.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_Single_Point() + { + var existingCube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }); + var cube = new NpgsqlCube(existingCube, 4.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(4)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_Single_NotPoint() + { + var existingCube = new NpgsqlCube(new [] { 1.0, 2.0 }, new [] { 3.0, 4.0 }); + var cube = new NpgsqlCube(existingCube, 3.0); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(3)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 3.0, 4.0, 3.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_LowerLeft_UpperRight_Point() + { + var existingCube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }); + var cube = new NpgsqlCube(existingCube, 4.0, 4.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(4)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_LowerLeft_UpperRight_NotPoint() + { + var existingCube = new NpgsqlCube(new [] { 1.0, 2.0 }, new [] { 3.0, 4.0 }); + var cube = new NpgsqlCube(existingCube, 4.0, 5.0); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(3)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 4.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 3.0, 4.0, 5.0 })); + } + + [Test] + public void Cube_Subset() + { + var cube = new NpgsqlCube(new [] { 1.0, 2.0, 3.0 }, new [] { 4.0, 5.0, 6.0 }); + Assert.That(cube.ToSubset(0, 2, 1, 1), Is.EqualTo(new NpgsqlCube(new [] { 1.0, 3.0, 2.0, 2.0 }, new [] { 4.0, 6.0, 5.0, 5.0 }))); + } + + [Test] + public void Cube_ToString_NotPoint() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }, new[] { 4.0, 5.0, 6.0 }); + Assert.That(cube.ToString(), Is.EqualTo("(1, 2, 3),(4, 5, 6)")); + } + + [Test] + public void Cube_ToString_Point() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }); + Assert.That(cube.ToString(), Is.EqualTo("(1, 2, 3)")); + } + + [Test] + public async Task Cube_Array() + { + var data = new[] + { + new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }), + new NpgsqlCube(new[] { 5.0, 6.0 }), + new NpgsqlCube(1.0, 2.0) + }; + + await AssertType( + data, + @"{""(1, 2),(3, 4)"",""(5, 6)"",""(1),(2)""}", + "cube[]", + dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public void Cube_DimensionMismatch_ThrowsArgumentException() + { + var ex = Assert.Throws(() => new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0 })); + Assert.That(ex!.Message, Does.Contain("Different point dimensions")); + } + + [Test] + public Task Cube_NegativeValues() + => AssertType( + new NpgsqlCube(new[] { -1.0, -2.0, -3.0 }, new[] { -4.0, -5.0, -6.0 }), + "(-1, -2, -3),(-4, -5, -6)", + "cube", + dataTypeInference: DataTypeInference.Nothing); + + [Test] + public void Cube_Equality_HashCode() + { + var cube1 = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }); + var cube2 = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }); + var cube3 = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 5.0 }); + + // Test equality + Assert.That(cube1, Is.EqualTo(cube2)); + Assert.That(cube1 == cube2, Is.True); + Assert.That(cube1 != cube3, Is.True); + Assert.That(cube1.Equals(cube2), Is.True); + Assert.That(cube1.Equals(cube3), Is.False); + + // Test hash code consistency + Assert.That(cube1.GetHashCode(), Is.EqualTo(cube2.GetHashCode())); + Assert.That(cube1.GetHashCode(), Is.Not.EqualTo(cube3.GetHashCode())); + } + + [Test] + public Task Cube_ZeroValues() + => AssertType( + new NpgsqlCube(0.0, 0.0), + "(0)", + "cube", + dataTypeInference: DataTypeInference.Nothing); + + [Test] + public Task Cube_MaxDimensions() + { + var lowerLeft = new double[100]; + var upperRight = new double[100]; + for (var i = 0; i < 100; i++) + { + lowerLeft[i] = i; + upperRight[i] = i + 100; + } + + var expectedLower = string.Join(", ", lowerLeft); + var expectedUpper = string.Join(", ", upperRight); + var expected = $"({expectedLower}),({expectedUpper})"; + + return AssertType( + new NpgsqlCube(lowerLeft, upperRight), + expected, + "cube", + dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task Cube_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var errorMessage = string.Format( + NpgsqlStrings.CubeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableCube), nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = + await AssertTypeUnsupportedRead("(1),(2)", "cube", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedWrite(new NpgsqlCube(1.0, 2.0), "cube", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableCube() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableCube(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableArrays() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableCube(); + dataSourceBuilder.EnableArrays(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", dataTypeInference: DataTypeInference.Nothing); + } + + [OneTimeSetUp] + public async Task SetUp() + { + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "13.0"); + await TestUtil.EnsureExtensionAsync(conn, "cube"); + } +} diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 3bcf87378f..834ad346e9 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -1,198 +1,108 @@ using System; +using System.Data; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Util.Statics; namespace Npgsql.Tests.Types; -[TestFixture(true)] -#if DEBUG [TestFixture(false)] -#endif +#if DEBUG +[TestFixture(true)] [NonParallelizable] -public class DateTimeInfinityTests : TestBase, IDisposable -{ - [Test] - public async Task TimestampTz_write() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT ($1 AT TIME ZONE 'UTC')::text", conn) - { - Parameters = - { - new() { Value = DateTime.MinValue, NpgsqlDbType = NpgsqlDbType.TimestampTz }, - } - }; - - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01 00:00:00" : "-infinity")); - - cmd.Parameters[0].Value = DateTime.MaxValue; - - if (DisableDateTimeInfinityConversions) - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); - else - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("infinity")); - } - - [Test] - public async Task TimestampTz_read() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand( - "SELECT '-infinity'::timestamp with time zone, 'infinity'::timestamp with time zone", - conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader[0], Is.EqualTo(DateTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(DateTime.MaxValue)); - } - } - - [Test] - public async Task Timestamp_write() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT $1::text, $2::text", conn) - { - Parameters = - { - new() { Value = DateTime.MinValue, NpgsqlDbType = NpgsqlDbType.Timestamp }, - new() { Value = DateTime.MaxValue, NpgsqlDbType = NpgsqlDbType.Timestamp }, - } - }; - await using (var reader = await cmd.ExecuteReaderAsync()) - { - await reader.ReadAsync(); - - Assert.That(reader[0], Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01 00:00:00" : "-infinity")); - Assert.That(reader[1], Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); - } - } - - [Test] - public async Task Timestamp_read() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand( - "SELECT '-infinity'::timestamp without time zone, 'infinity'::timestamp without time zone", - conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader[0], Is.EqualTo(DateTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(DateTime.MaxValue)); - } - } - - [Test, NonParallelizable] - public async Task Date_DateTime_write() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT $1::text, $2::text", conn) - { - Parameters = - { - new() { Value = DateTime.MinValue, NpgsqlDbType = NpgsqlDbType.Date }, - new() { Value = DateTime.MaxValue, NpgsqlDbType = NpgsqlDbType.Date } - } - }; - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader[0], Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01" : "-infinity")); - Assert.That(reader[1], Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); - } - - [Test] - public async Task Date_DateTime_read() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT '-infinity'::date, 'infinity'::date", conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader[0], Is.EqualTo(DateTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(DateTime.MaxValue)); - } - } - -#if NET6_0_OR_GREATER - [Test] - public async Task Date_DateOnly_write() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT $1::text, $2::text", conn) - { - Parameters = - { - new() { Value = DateOnly.MinValue, NpgsqlDbType = NpgsqlDbType.Date }, - new() { Value = DateOnly.MaxValue, NpgsqlDbType = NpgsqlDbType.Date } - } - }; - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader[0], Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01" : "-infinity")); - Assert.That(reader[1], Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); - } - - [Test] - public async Task Date_DateOnly_read() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT '-infinity'::date, 'infinity'::date", conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader.GetFieldValue(0), Is.EqualTo(DateOnly.MinValue)); - Assert.That(reader.GetFieldValue(1), Is.EqualTo(DateOnly.MaxValue)); - } - } #endif +public sealed class DateTimeInfinityTests : TestBase, IDisposable +{ + static readonly TestCaseData[] TimestampDateTimeValues = + [ + new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01 00:00:00", "0002-01-01 00:00:00") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTime.MinValue, "0001-01-01 00:00:00", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTime.MaxValue, "9999-12-31 23:59:59.999999", "infinity") + .SetName("MaxValue") + ]; + + static readonly TestCaseData[] TimestampTzDateTimeValues = + [ + new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01 00:00:00+00", "0002-01-01 00:00:00+00") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTime.MinValue, "0001-01-01 00:00:00+00", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTime.MaxValue, "9999-12-31 23:59:59.999999+00", "infinity") + .SetName("MaxValue") + ]; + + static readonly TestCaseData[] TimestampTzDateTimeOffsetValues = + [ + new TestCaseData(DateTimeOffset.MinValue.ToUniversalTime().AddYears(1), "0002-01-01 00:00:00+00", "0002-01-01 00:00:00+00") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTimeOffset.MinValue, "0001-01-01 00:00:00+00", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTimeOffset.MaxValue, "9999-12-31 23:59:59.999999+00", "infinity") + .SetName("MaxValue") + ]; + + static readonly TestCaseData[] DateDateTimeValues = + [ + new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01", "0002-01-01") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTime.MinValue, "0001-01-01", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTime.MaxValue, "9999-12-31", "infinity") + .SetName("MaxValue") + ]; + + // As we can't roundtrip DateTime.MaxValue due to precision differences with postgres we are lenient with equality for this particular value. + static readonly Func MaxValuePrecisionLenientComparer = + (expected, actual) => expected == DateTime.MaxValue && actual == new DateTime(expected.Ticks - 9) || actual == expected; + + [Test, TestCaseSource(nameof(TimestampDateTimeValues))] + public Task Timestamp_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp without time zone", + dbType: DbType.DateTime2, + comparer: MaxValuePrecisionLenientComparer); + + [Test, TestCaseSource(nameof(TimestampTzDateTimeValues))] + public Task TimestampTz_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(new DateTime(dateTime.Ticks, DateTimeKind.Utc), DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp with time zone", + dbType: DbType.DateTime, + comparer: MaxValuePrecisionLenientComparer); + + [Test, TestCaseSource(nameof(TimestampTzDateTimeOffsetValues))] + public Task TimestampTz_DateTimeOffset(DateTimeOffset dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp with time zone", + dbType: DbType.DateTime, + comparer: (expected, actual) => MaxValuePrecisionLenientComparer(expected.DateTime, actual.DateTime), + valueTypeEqualsFieldType: false); + + [Test, TestCaseSource(nameof(DateDateTimeValues))] + public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(DisableDateTimeInfinityConversions ? dateTime.Date : dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2), valueTypeEqualsFieldType: false); + + static readonly TestCaseData[] DateOnlyDateTimeValues = + [ + new TestCaseData(DateOnly.MinValue.AddYears(1), "0002-01-01", "0002-01-01") + .SetName("MinValue_AddYear"), + new TestCaseData(DateOnly.MinValue, "0001-01-01", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateOnly.MaxValue, "9999-12-31", "infinity") + .SetName("MaxValue") + ]; + + [Test, TestCaseSource(nameof(DateOnlyDateTimeValues))] + public Task Date_DateOnly(DateOnly dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "date", + dbType: DbType.Date); + + NpgsqlDataSource? _dataSource; + protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.Timezone = "UTC"); public DateTimeInfinityTests(bool disableDateTimeInfinityConversions) { @@ -212,5 +122,6 @@ public void Dispose() #if DEBUG DisableDateTimeInfinityConversions = false; #endif + DataSource.Dispose(); } } diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index e91cf5c7f2..d1e5feb0df 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -1,4 +1,5 @@ -using System; +using System; +using System.Collections.Generic; using System.Data; using System.Threading.Tasks; using NpgsqlTypes; @@ -7,31 +8,63 @@ namespace Npgsql.Tests.Types; -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing public class DateTimeTests : TestBase { #region Date + [Test] + public Task Date_as_DateOnly() + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", dbType: DbType.Date); + [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForWriting: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2), valueTypeEqualsFieldType: false); + + [Test] + public Task Date_as_DateTime_with_date_and_time_before_2000() + => AssertTypeWrite(new DateTime(1980, 10, 1, 11, 0, 0), "1980-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2)); // Internal PostgreSQL representation (days since 2020-01-01), for out-of-range values. [Test] public Task Date_as_int() - => AssertType(7579, "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(7579, "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.Int32), valueTypeEqualsFieldType: false); + + [Test] + public Task Daterange_as_NpgsqlRange_of_DateOnly() + => AssertType( + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + "[2002-03-04,2002-03-06)", + "daterange", + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + [Test] + public Task Daterange_array_as_NpgsqlRange_of_DateOnly_array() + => AssertType( + new[] + { + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 9), false) + }, + """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-09)"}""", + "daterange[]", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false); [Test] public Task Daterange_as_NpgsqlRange_of_DateTime() => AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", - "daterange", - NpgsqlDbType.DateRange, - isDefaultForWriting: false); + "daterange", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false); [Test] - public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateTime() + public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateOnly() { await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); @@ -39,128 +72,103 @@ public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateTime() await AssertType( new[] { - new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), - new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - NpgsqlDbType.DateMultirange, - isDefaultForWriting: false); + "datemultirange"); } -#if NET6_0_OR_GREATER - [Test] - public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); - - [Test] - public Task Daterange_as_NpgsqlRange_of_DateOnly() - => AssertType( - new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), - "[2002-03-04,2002-03-06)", - "daterange", - NpgsqlDbType.DateRange, - isDefaultForReading: false); - [Test] - public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateOnly() + public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateTime() { await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); - await AssertType( + await AssertType( new[] { - new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), - new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - NpgsqlDbType.DateMultirange, - isDefaultForReading: false); + "datemultirange", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false); } -#endif #endregion #region Time [Test] - public Task Time_as_TimeSpan() - => AssertType( - new TimeSpan(0, 10, 45, 34, 500), - "10:45:34.5", + public Task Time_as_TimeOnly() + => AssertType(new TimeOnly(10, 45, 34, 500), "10:45:34.5", "time without time zone", - NpgsqlDbType.Time, - DbType.Time, - isDefaultForWriting: false); + dbType: DbType.Time); -#if NET6_0_OR_GREATER [Test] - public Task Time_as_TimeOnly() - => AssertType( - new TimeOnly(10, 45, 34, 500), - "10:45:34.5", - "time without time zone", - NpgsqlDbType.Time, - DbType.Time, - isDefaultForReading: false); -#endif + public Task Time_as_TimeSpan() + => AssertType(new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", + "time without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Time, DbType.Object), valueTypeEqualsFieldType: false); #endregion #region Time with timezone - [Test] - public async Task TimeTz_as_DateTimeOffset() - { - await AssertTypeRead("13:03:45.51+02", - "time with time zone", new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2))); - - await AssertTypeWrite( - new DateTimeOffset(1, 1, 1, 13, 3, 45, 510, TimeSpan.FromHours(2)), - "13:03:45.51+02", - "time with time zone", - NpgsqlDbType.TimeTz, - isDefault: false); - } - - [Test] - public Task TimeTz_before_utc_zero() - => AssertTypeRead("01:00:00+02", - "time with time zone", new DateTimeOffset(1, 1, 2, 1, 0, 0, new TimeSpan(0, 2, 0, 0))); + static readonly TestCaseData[] TimeTzValues = + [ + new TestCaseData(new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), "13:03:45.51+02") + .SetName("Timezone"), + new TestCaseData(new DateTimeOffset(1, 1, 2, 1, 0, 45, 510, TimeSpan.FromHours(-3)), "01:00:45.51-03") + .SetName("Negative_timezone"), + new TestCaseData(new DateTimeOffset(1212720130000, TimeSpan.Zero), "09:41:12.013+00") + .SetName("Utc"), + new TestCaseData(new DateTimeOffset(1, 1, 2, 1, 0, 0, new TimeSpan(0, 2, 0, 0)), "01:00:00+02") + .SetName("Before_utc_zero") + ]; + + [Test, TestCaseSource(nameof(TimeTzValues))] + public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) + => AssertType(time, sqlLiteral, + "time with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.DateTime)); #endregion #region Timestamp static readonly TestCaseData[] TimestampValues = - { + [ new TestCaseData(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "1998-04-12 13:26:38") .SetName("Timestamp_pre2000"), new TestCaseData(new DateTime(2015, 1, 27, 8, 45, 12, 345, DateTimeKind.Unspecified), "2015-01-27 08:45:12.345") .SetName("Timestamp_post2000"), new TestCaseData(new DateTime(2013, 7, 25, 0, 0, 0, DateTimeKind.Unspecified), "2013-07-25 00:00:00") .SetName("Timestamp_date_only") - }; + ]; [Test, TestCaseSource(nameof(TimestampValues))] - public Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) - => AssertType(dateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2); + public async Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) + { + await AssertType(dateTime, sqlLiteral, "timestamp without time zone", dbType: DbType.DateTime2, + // Explicitly check kind as well. + comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); + + await AssertType( + new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp without time zone[]", + valueTypeEqualsFieldType: false); + } [Test] public Task Timestamp_cannot_write_utc_DateTime() - => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); + => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); [Test] public Task Timestamp_as_long() - => AssertType( - -54297202000000, - "1998-04-12 13:26:38", - "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime2, - isDefault: false); + => AssertType(-54297202000000, "1998-04-12 13:26:38", + "timestamp without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime2, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public Task Timestamp_cannot_use_as_DateTimeOffset() @@ -177,7 +185,23 @@ public Task Tsrange_as_NpgsqlRange_of_DateTime() new(1998, 4, 12, 15, 26, 38, DateTimeKind.Local)), @"[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""]", "tsrange", - NpgsqlDbType.TimestampRange); + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + [Test] + public Task Tsrange_array_as_NpgsqlRange_of_DateTime_array() + => AssertType( + new[] + { + new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), + new(1998, 4, 12, 15, 26, 38, DateTimeKind.Local)), + new NpgsqlRange( + new(1998, 4, 13, 13, 26, 38, DateTimeKind.Local), + new(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)), + }, + """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]","[\"1998-04-13 13:26:38\",\"1998-04-13 15:26:38\"]"}""", + "tsrange[]", + dataTypeInference: DataTypeInference.Mismatch); [Test] public async Task Tsmultirange_as_array_of_NpgsqlRange_of_DateTime() @@ -196,8 +220,7 @@ await AssertType( new(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)), }, @"{[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""],[""1998-04-13 13:26:38"",""1998-04-13 15:26:38""]}", - "tsmultirange", - NpgsqlDbType.TimestampMultirange); + "tsmultirange"); } #endregion @@ -207,33 +230,44 @@ await AssertType( // Note that the below text representations are local (according to TimeZone, which is set to Europe/Berlin in this test class), // because that's how PG does timestamptz *text* representation. static readonly TestCaseData[] TimestampTzWriteValues = - { + [ new TestCaseData(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "1998-04-12 15:26:38+02") .SetName("Timestamptz_write_pre2000"), new TestCaseData(new DateTime(2015, 1, 27, 8, 45, 12, 345, DateTimeKind.Utc), "2015-01-27 09:45:12.345+01") .SetName("Timestamptz_write_post2000"), new TestCaseData(new DateTime(2013, 7, 25, 0, 0, 0, DateTimeKind.Utc), "2013-07-25 02:00:00+02") .SetName("Timestamptz_write_date_only") - }; + ]; [Test, TestCaseSource(nameof(TimestampTzWriteValues))] - public Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) - => AssertType(dateTime, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime); + public async Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) + { + await AssertType(dateTime, sqlLiteral, "timestamp with time zone", dbType: DbType.DateTime, + // Explicitly check kind as well. + comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); + + await AssertType( + new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp with time zone[]", + valueTypeEqualsFieldType: false); + + } [Test] public async Task Timestamptz_infinity_as_DateTime() { - await AssertType(DateTime.MinValue, "-infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, - isDefault: false); - await AssertType(DateTime.MaxValue, "infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, - isDefault: false); + await AssertType(DateTime.MinValue, "-infinity", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.DateTime2)); + await AssertType(DateTime.MaxValue, "infinity", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.DateTime2)); } [Test] public async Task Timestamptz_cannot_write_non_utc_DateTime() { - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); } [Test] @@ -243,9 +277,8 @@ public async Task Timestamptz_as_DateTimeOffset_utc() new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefaultForReading: false); + dbType: DbType.DateTime, + valueTypeEqualsFieldType: false); Assert.That(dateTimeOffset.Offset, Is.EqualTo(TimeSpan.Zero)); } @@ -255,25 +288,35 @@ public Task Timestamptz_as_DateTimeOffset_utc_with_DbType_DateTimeOffset() => AssertTypeWrite( new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTimeOffset, - inferredDbType: DbType.DateTime, - isDefault: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.DateTime, DbType.DateTimeOffset)); [Test] public Task Timestamptz_cannot_write_non_utc_DateTimeOffset() - => AssertTypeUnsupportedWrite(new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.FromHours(2))); + => AssertTypeUnsupportedWrite(new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.FromHours(2))); [Test] public Task Timestamptz_as_long() - => AssertType( - -54297202000000, - "1998-04-12 15:26:38+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTime, - isDefault: false); + => AssertType(-54297202000000, "1998-04-12 15:26:38+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.Int64), valueTypeEqualsFieldType: false); + + [Test] + public async Task Timestamptz_array_as_DateTimeOffset_array() + { + var dateTimeOffsets = await AssertType( + new[] + { + new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), + new DateTimeOffset(1999, 4, 12, 13, 26, 38, TimeSpan.Zero) + }, + """{"1998-04-12 15:26:38+02","1999-04-12 15:26:38+02"}""", + "timestamp with time zone[]", + valueTypeEqualsFieldType: false); + + Assert.That(dateTimeOffsets[0].Offset, Is.EqualTo(TimeSpan.Zero)); + Assert.That(dateTimeOffsets[1].Offset, Is.EqualTo(TimeSpan.Zero)); + } [Test] public Task Tstzrange_as_NpgsqlRange_of_DateTime() @@ -283,7 +326,23 @@ public Task Tstzrange_as_NpgsqlRange_of_DateTime() new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", "tstzrange", - NpgsqlDbType.TimestampTzRange); + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + [Test] + public Task Tstzrange_array_as_NpgsqlRange_of_DateTime_array() + => AssertType( + new[] + { + new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc), + new(1998, 4, 13, 15, 26, 38, DateTimeKind.Utc)), + }, + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", + "tstzrange[]", + dataTypeInference: DataTypeInference.Mismatch); [Test] public async Task Tstzmultirange_as_array_of_NpgsqlRange_of_DateTime() @@ -302,22 +361,20 @@ await AssertType( new DateTime(1998, 4, 13, 15, 26, 38, DateTimeKind.Utc)), }, @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange); + "tstzmultirange"); } [Test] public Task Cannot_mix_DateTime_Kinds_in_array() - => AssertTypeUnsupportedWrite(new[] - { + => AssertTypeUnsupportedWrite([ new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), - new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), - }); + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local) + ]); [Test] public Task Cannot_mix_DateTime_Kinds_in_range() - => AssertTypeUnsupportedWrite(new NpgsqlRange( + => AssertTypeUnsupportedWrite, ArgumentException>(new NpgsqlRange( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local))); @@ -327,15 +384,35 @@ public async Task Cannot_mix_DateTime_Kinds_in_multirange() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); - await AssertTypeUnsupportedWrite(new[] - { + await AssertTypeUnsupportedWrite[], ArgumentException>([ + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), new NpgsqlRange( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), new NpgsqlRange( new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Local), - new DateTime(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)), - }); + new DateTime(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)) + ]); } [Test] @@ -343,8 +420,8 @@ public void NpgsqlParameterDbType_is_value_dependent_datetime_or_datetime2() { var localtimestamp = new NpgsqlParameter { Value = DateTime.Now }; var unspecifiedtimestamp = new NpgsqlParameter { Value = new DateTime() }; - Assert.AreEqual(DbType.DateTime2, localtimestamp.DbType); - Assert.AreEqual(DbType.DateTime2, unspecifiedtimestamp.DbType); + Assert.That(localtimestamp.DbType, Is.EqualTo(DbType.DateTime2)); + Assert.That(unspecifiedtimestamp.DbType, Is.EqualTo(DbType.DateTime2)); // We don't support any DateTimeOffset other than offset 0 which maps to timestamptz, // we might add an exception for offset == DateTimeOffset.Now.Offset (local offset) mapping to timestamp at some point. @@ -353,8 +430,8 @@ public void NpgsqlParameterDbType_is_value_dependent_datetime_or_datetime2() var timestamptz = new NpgsqlParameter { Value = DateTime.UtcNow }; var dtotimestamptz = new NpgsqlParameter { Value = DateTimeOffset.UtcNow }; - Assert.AreEqual(DbType.DateTime, timestamptz.DbType); - Assert.AreEqual(DbType.DateTime, dtotimestamptz.DbType); + Assert.That(timestamptz.DbType, Is.EqualTo(DbType.DateTime)); + Assert.That(dtotimestamptz.DbType, Is.EqualTo(DbType.DateTime)); } [Test] @@ -362,13 +439,51 @@ public void NpgsqlParameterNpgsqlDbType_is_value_dependent_timestamp_or_timestam { var localtimestamp = new NpgsqlParameter { Value = DateTime.Now }; var unspecifiedtimestamp = new NpgsqlParameter { Value = new DateTime() }; - Assert.AreEqual(NpgsqlDbType.Timestamp, localtimestamp.NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Timestamp, unspecifiedtimestamp.NpgsqlDbType); + Assert.That(localtimestamp.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp)); + Assert.That(unspecifiedtimestamp.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp)); var timestamptz = new NpgsqlParameter { Value = DateTime.UtcNow }; var dtotimestamptz = new NpgsqlParameter { Value = DateTimeOffset.UtcNow }; - Assert.AreEqual(NpgsqlDbType.TimestampTz, timestamptz.NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.TimestampTz, dtotimestamptz.NpgsqlDbType); + Assert.That(timestamptz.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz)); + Assert.That(dtotimestamptz.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz)); + } + + [Test] + public async Task Array_of_nullable_timestamptz() + { + await using var datasource = CreateDataSource(csb => + { + csb.ArrayNullabilityMode = ArrayNullabilityMode.PerInstance; + csb.Timezone = "Europe/Berlin"; + }); + await AssertType(datasource, + new DateTime?[] + { + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + null + }, + @"{""1998-04-12 15:26:38+02"",NULL}", + "timestamp with time zone[]"); + + // Make sure delayed converter resolution works when null precedes a non-null value. + // We expect the resolution of null values to not lock in the default type timestamp. + // This would cause the subsequent non-null value to fail to convert, as it requires timestamptz. + await AssertType(datasource, + new DateTime?[] + { + null, + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) + }, + @"{NULL,""1998-04-12 15:26:38+02""}", + "timestamp with time zone[]"); + + await AssertType(datasource, + new DateTime?[] + { + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + }, + @"{""1998-04-12 15:26:38+02""}", + "timestamp with time zone[]", valueTypeEqualsFieldType: false); // we write DateTime?[], but will read DateTime[] from GetValue } #endregion @@ -376,7 +491,7 @@ public void NpgsqlParameterNpgsqlDbType_is_value_dependent_timestamp_or_timestam #region Interval static readonly TestCaseData[] IntervalValues = - { + [ new TestCaseData(new TimeSpan(0, 2, 3, 4, 5), "02:03:04.005") .SetName("Interval_time_only"), new TestCaseData(new TimeSpan(1, 2, 3, 4, 5), "1 day 02:03:04.005") @@ -385,43 +500,38 @@ public void NpgsqlParameterNpgsqlDbType_is_value_dependent_timestamp_or_timestam .SetName("Interval_with_many_days"), new TestCaseData(new TimeSpan(new TimeSpan(2, 3, 4).Ticks + 10), "02:03:04.000001") .SetName("Interval_with_microsecond") - }; + ]; [Test, TestCaseSource(nameof(IntervalValues))] public Task Interval_as_TimeSpan(TimeSpan timeSpan, string sqlLiteral) - => AssertType(timeSpan, sqlLiteral, "interval", NpgsqlDbType.Interval, isDefaultForWriting: false); + => AssertType(timeSpan, sqlLiteral, "interval"); [Test] public Task Interval_write_as_TimeSpan_truncates_ticks() => AssertTypeWrite( new TimeSpan(new TimeSpan(2, 3, 4).Ticks + 1), "02:03:04", - "interval", - NpgsqlDbType.Interval, - isDefault: false); + "interval"); [Test] public Task Interval_as_NpgsqlInterval() => AssertType( new NpgsqlInterval(2, 15, 7384005000), - "2 mons 15 days 02:03:04.005", "interval", - NpgsqlDbType.Interval, - isDefaultForReading: false, - isDefaultForWriting: false); + "2 mons 15 days 02:03:04.005", "interval", valueTypeEqualsFieldType: false); [Test] public Task Interval_with_months_cannot_read_as_TimeSpan() - => AssertTypeUnsupportedRead("1 month 2 days", "interval"); + => AssertTypeUnsupportedRead("1 month 2 days", "interval"); #endregion - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override async ValueTask OpenConnectionAsync() { - var conn = await base.OpenConnectionAsync(connectionString); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); } diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index fe415116c1..0905b7b4d6 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -1,25 +1,18 @@ using System; using System.Threading.Tasks; +using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; -public class DomainTests : MultiplexingTestBase +public class DomainTests : TestBase { [Test, Description("Resolves a domain type handler via the different pathways")] public async Task Domain_resolution() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Domain_resolution), // Prevent backend type caching in TypeHandlerRegistry - Pooling = false - }; - - using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); var type = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE DOMAIN {type} AS text"); @@ -80,5 +73,25 @@ class SomeComposite public string? Value { get; set; } } - public DomainTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + [Test] + public async Task Domain_over_range() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE DOMAIN {type} AS integer; CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new NpgsqlRange(1, 2), + "[1,2]", + rangeType, + dataTypeInference: DataTypeInference.Mismatch); + } } diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 1f450c0a35..3da3b522da 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -1,14 +1,17 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; +using System.Data; using System.Threading.Tasks; using Npgsql.NameTranslation; using Npgsql.PostgresTypes; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; -public class EnumTests : MultiplexingTestBase +public class EnumTests : TestBase { enum Mood { Sad, Ok, Happy } enum AnotherEnum { Value1, Value2 } @@ -24,7 +27,54 @@ public async Task Data_source_mapping() dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task Data_source_unmap() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(type); + + var isUnmapSuccessful = dataSourceBuilder.UnmapEnum(type); + await using var dataSource = dataSourceBuilder.Build(); + + Assert.That(isUnmapSuccessful); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); + } + + [Test] + public async Task Data_source_mapping_non_generic() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(typeof(Mood), type); + await using var dataSource = dataSourceBuilder.Build(); + await AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + } + + [Test] + public async Task Data_source_unmap_non_generic() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(typeof(Mood), type); + + var isUnmapSuccessful = dataSourceBuilder.UnmapEnum(typeof(Mood), type); + await using var dataSource = dataSourceBuilder.Build(); + + Assert.That(isUnmapSuccessful); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); } [Test] @@ -42,7 +92,7 @@ await adminConnection.ExecuteNonQueryAsync($@" dataSourceBuilder.MapEnum(type2); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { Mood.Ok, Mood.Sad }, "{ok,sad}", type1 + "[]", npgsqlDbType: null); + await AssertType(dataSource, new[] { Mood.Ok, Mood.Sad }, "{ok,sad}", type1 + "[]", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -56,7 +106,7 @@ public async Task Array() dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { Mood.Ok, Mood.Happy }, "{ok,happy}", type + "[]", npgsqlDbType: null); + await AssertType(dataSource, new[] { Mood.Ok, Mood.Happy }, "{ok,happy}", type + "[]", dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -70,9 +120,9 @@ public async Task Name_translation_default_snake_case() dataSourceBuilder.MapEnum(enumName1); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, NameTranslationEnum.Simple, "simple", enumName1, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.TwoWords, "two_words", enumName1, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", enumName1, npgsqlDbType: null); + await AssertType(dataSource, NameTranslationEnum.Simple, "simple", enumName1, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.TwoWords, "two_words", enumName1, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", enumName1, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -86,15 +136,16 @@ public async Task Name_translation_null() dataSourceBuilder.MapEnum(type, nameTranslator: new NpgsqlNullNameTranslator()); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, NameTranslationEnum.Simple, "Simple", type, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.TwoWords, "TwoWords", type, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", type, npgsqlDbType: null); + await AssertType(dataSource, NameTranslationEnum.Simple, "Simple", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.TwoWords, "TwoWords", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", type, dataTypeInference: DataTypeInference.Nothing); } [Test] public async Task Unmapped_enum_as_clr_enum() { - await using var connection = await OpenConnectionAsync(); + await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes()); + await using var connection = await dataSource.OpenConnectionAsync(); var type1 = await GetTempTypeName(connection); var type2 = await GetTempTypeName(connection); await connection.ExecuteNonQueryAsync(@$" @@ -102,8 +153,30 @@ await connection.ExecuteNonQueryAsync(@$" CREATE TYPE {type2} AS ENUM ('value1', 'value2');"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type1, npgsqlDbType: null, isDefault: false); - await AssertType(connection, AnotherEnum.Value2, "value2", type2, npgsqlDbType: null, isDefault: false); + await AssertType(connection, Mood.Happy, "happy", type1, dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); + await AssertType(connection, AnotherEnum.Value2, "value2", type2, dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); + } + + [Test] + public async Task Unmapped_enum_as_clr_enum_supported_only_with_EnableUnmappedTypes() + { + await using var connection = await DataSource.OpenConnectionAsync(); + var enumType = await GetTempTypeName(connection); + await connection.ExecuteNonQueryAsync($"CREATE TYPE {enumType} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.UnmappedEnumsNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite(Mood.Happy, enumType); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead("happy", enumType); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } [Test] @@ -114,7 +187,9 @@ public async Task Unmapped_enum_as_string() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, "happy", "happy", type, npgsqlDbType: null, isDefaultForWriting: false); + await AssertType(connection, "happy", "happy", type, + dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); } enum NameTranslationEnum @@ -140,8 +215,8 @@ await adminConnection.ExecuteNonQueryAsync($@" dataSourceBuilder.MapEnum($"{schema2}.my_enum"); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Enum1.One, "one", $"{schema1}.my_enum", npgsqlDbType: null); - await AssertType(dataSource, Enum2.Alpha, "alpha", $"{schema2}.my_enum", npgsqlDbType: null); + await AssertType(dataSource, Enum1.One, "one", $"{schema1}.my_enum", dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, Enum2.Alpha, "alpha", $"{schema2}.my_enum", dataTypeInference: DataTypeInference.Nothing); } enum Enum1 { One } @@ -150,8 +225,8 @@ enum Enum2 { Alpha } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1779")] public async Task GetPostgresType() { - using var _ = CreateTempPool(ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + using var conn = await dataSource.OpenConnectionAsync(); var type = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); conn.ReloadTypes(); @@ -171,6 +246,4 @@ enum TestEnum [PgName("label3")] Label3 } - - public EnumTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index f039c5b587..10a3d320a2 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -1,50 +1,46 @@ -using System.Collections; +using System; +using System.Collections; using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; +#pragma warning disable CS0618 // NpgsqlTsVector.Parse is obsolete + namespace Npgsql.Tests.Types; -public class FullTextSearchTests : MultiplexingTestBase +public class FullTextSearchTests : TestBase { - public FullTextSearchTests(MultiplexingMode multiplexingMode) - : base(multiplexingMode) { } - [Test] public Task TsVector() => AssertType( NpgsqlTsVector.Parse("'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'"), "'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'", - "tsvector", - NpgsqlDbType.TsVector); + "tsvector"); public static IEnumerable TsQueryTestCases() => new[] { - new object[] - { + [ "'a'", new NpgsqlTsQueryLexeme("a") - }, - new object[] - { + ], + [ "!'a'", new NpgsqlTsQueryNot( new NpgsqlTsQueryLexeme("a")) - }, - new object[] - { + ], + [ "'a' | 'b'", new NpgsqlTsQueryOr( new NpgsqlTsQueryLexeme("a"), new NpgsqlTsQueryLexeme("b")) - }, - new object[] - { + ], + [ "'a' & 'b'", new NpgsqlTsQueryAnd( new NpgsqlTsQueryLexeme("a"), new NpgsqlTsQueryLexeme("b")) - }, + ], new object[] { "'a' <-> 'b'", @@ -56,5 +52,44 @@ public static IEnumerable TsQueryTestCases() => new[] [Test] [TestCaseSource(nameof(TsQueryTestCases))] public Task TsQuery(string sqlLiteral, NpgsqlTsQuery query) - => AssertType(query, sqlLiteral, "tsquery", NpgsqlDbType.TsQuery); -} \ No newline at end of file + => AssertType(query, sqlLiteral, "tsquery"); + + [Test] + public async Task Full_text_search_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var errorMessage = string.Format( + NpgsqlStrings.FullTextSearchNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), + nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), dataTypeName: null, dataSource); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), dataTypeName: null, dataSource); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableFullTextSearch() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableFullTextSearch(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(new NpgsqlTsQueryLexeme("a"), "'a'", "tsquery"); + await AssertType(NpgsqlTsVector.Parse("'1'"), "'1'", "tsvector"); + } +} diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index b5948b0e66..f3df5891c1 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -1,4 +1,4 @@ -using System.Threading.Tasks; +using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -10,55 +10,115 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -class GeometricTypeTests : MultiplexingTestBase +class GeometricTypeTests : TestBase { [Test] public Task Point() - => AssertType(new NpgsqlPoint(1.2, 3.4), "(1.2,3.4)", "point", NpgsqlDbType.Point); + => AssertType(new NpgsqlPoint(1.2, 3.4), "(1.2,3.4)", "point"); [Test] public Task Line() - => AssertType(new NpgsqlLine(1, 2, 3), "{1,2,3}", "line", NpgsqlDbType.Line); + => AssertType(new NpgsqlLine(1, 2, 3), "{1,2,3}", "line"); [Test] public Task LineSegment() - => AssertType(new NpgsqlLSeg(1, 2, 3, 4), "[(1,2),(3,4)]", "lseg", NpgsqlDbType.LSeg); + => AssertType(new NpgsqlLSeg(1, 2, 3, 4), "[(1,2),(3,4)]", "lseg"); [Test] - public Task Box() - => AssertType(new NpgsqlBox(3, 4, 1, 2), "(4,3),(2,1)", "box", NpgsqlDbType.Box); + public async Task Box() + { + await AssertType( + new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), + "(4,3),(2,1)", + "box", + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + await AssertType( + new NpgsqlBox(top: -10, right: 0, bottom: -20, left: -10), + "(0,-10),(-10,-20)", + "box", + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + await AssertType( + new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), + "(4,3),(2,1)", + "box", + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + var swapped = new NpgsqlBox(top: -20, right: -10, bottom: -10, left: 0); + + await AssertType( + swapped, + "(0,-10),(-10,-20)", + "box", + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + await AssertType( + swapped with { UpperRight = new NpgsqlPoint(-20,-10) }, + "(-10,-10),(-20,-20)", + "box", + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + await AssertType( + swapped with { LowerLeft = new NpgsqlPoint(10, 10) }, + "(10,10),(0,-10)", + "box", + skipArrayCheck: true); // Uses semicolon instead of comma as separator + } + + [Test] + public async Task Box_array() + { + var data = new[] + { + new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), + new NpgsqlBox(top: 5, right: 6, bottom: 3, left: 4), + new NpgsqlBox(top: -10, right: 0, bottom: -20, left: -10) + }; + + await AssertType( + data, + "{(4,3),(2,1);(6,5),(4,3);(0,-10),(-10,-20)}", + "box[]"); + + var swappedData = new[] + { + new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), + new NpgsqlBox(top: 3, right: 4, bottom: 5, left: 6), + new NpgsqlBox(top: -20, right: -10, bottom: -10, left: 0) + }; + + await AssertType( + swappedData, + "{(4,3),(2,1);(6,5),(4,3);(0,-10),(-10,-20)}", + "box[]"); + } [Test] public Task Path_closed() => AssertType( - new NpgsqlPath(new[] {new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)}, false), + new NpgsqlPath([new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)], false), "((1,2),(3,4))", - "path", - NpgsqlDbType.Path); + "path"); [Test] public Task Path_open() => AssertType( - new NpgsqlPath(new[] { new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4) }, true), + new NpgsqlPath([new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)], true), "[(1,2),(3,4)]", - "path", - NpgsqlDbType.Path); + "path"); [Test] public Task Polygon() => AssertType( new NpgsqlPolygon(new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)), "((1,2),(3,4))", - "polygon", - NpgsqlDbType.Polygon); + "polygon"); [Test] public Task Circle() => AssertType( new NpgsqlCircle(1, 2, 0.5), "<(1,2),0.5>", - "circle", - NpgsqlDbType.Circle); - - public GeometricTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file + "circle"); +} diff --git a/test/Npgsql.Tests/Types/HstoreTests.cs b/test/Npgsql.Tests/Types/HstoreTests.cs index ab1ee2ad6c..2d42be4448 100644 --- a/test/Npgsql.Tests/Types/HstoreTests.cs +++ b/test/Npgsql.Tests/Types/HstoreTests.cs @@ -1,13 +1,11 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -[NonParallelizable] -public class HstoreTests : MultiplexingTestBase +public class HstoreTests : TestBase { [Test] public Task Hstore() @@ -19,12 +17,11 @@ public Task Hstore() {"cd", "hello"} }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", - "hstore", - NpgsqlDbType.Hstore); + "hstore", dataTypeInference: DataTypeInference.Nothing); [Test] public Task Hstore_empty() - => AssertType(new Dictionary(), @"", "hstore", NpgsqlDbType.Hstore); + => AssertType(new Dictionary(), @"", "hstore", dataTypeInference: DataTypeInference.Nothing); [Test] public Task Hstore_as_ImmutableDictionary() @@ -39,8 +36,7 @@ public Task Hstore_as_ImmutableDictionary() immutableDictionary, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - NpgsqlDbType.Hstore, - isDefaultForReading: false); + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } [Test] @@ -54,8 +50,7 @@ public Task Hstore_as_IDictionary() }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - NpgsqlDbType.Hstore, - isDefaultForReading: false); + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); [OneTimeSetUp] public async Task SetUp() @@ -64,6 +59,4 @@ public async Task SetUp() TestUtil.MinimumPgVersion(conn, "9.1", "Hstore introduced in PostgreSQL 9.1"); await TestUtil.EnsureExtensionAsync(conn, "hstore", "9.1"); } - - public HstoreTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index a5d69664a4..7e69a85453 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -1,10 +1,10 @@ -using System.Threading.Tasks; +using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -public class InternalTypeTests : MultiplexingTestBase +public class InternalTypeTests : TestBase { [Test] public async Task Read_internal_char() @@ -20,26 +20,21 @@ public async Task Read_internal_char() } [Test] - [TestCase(NpgsqlDbType.Oid)] - [TestCase(NpgsqlDbType.Regtype)] - [TestCase(NpgsqlDbType.Regconfig)] - public async Task Internal_uint_types(NpgsqlDbType npgsqlDbType) + [TestCase("oid")] + [TestCase("regtype")] + [TestCase("regconfig")] + [TestCase("regclass")] + [TestCase("regcollation")] + [TestCase("regdictionary")] + [TestCase("regnamespace")] + [TestCase("regoper")] + [TestCase("regoperator")] + [TestCase("regproc")] + [TestCase("regprocedure")] + [TestCase("regrole")] + public async Task Internal_uint_types(string postgresType) { - var postgresType = npgsqlDbType.ToString().ToLowerInvariant(); - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand($"SELECT @max, 4294967295::{postgresType}, @eight, 8::{postgresType}", conn); - cmd.Parameters.AddWithValue("max", npgsqlDbType, uint.MaxValue); - cmd.Parameters.AddWithValue("eight", npgsqlDbType, 8u); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - for (var i = 0; i < reader.FieldCount; i++) - Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(uint))); - - Assert.That(reader.GetValue(0), Is.EqualTo(uint.MaxValue)); - Assert.That(reader.GetValue(1), Is.EqualTo(uint.MaxValue)); - Assert.That(reader.GetValue(2), Is.EqualTo(8u)); - Assert.That(reader.GetValue(3), Is.EqualTo(8u)); + await AssertType(uint.MaxValue, "4294967295", postgresType, dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -52,21 +47,22 @@ public async Task Tid() cmd.Parameters.AddWithValue("p", NpgsqlDbType.Tid, expected); using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - Assert.AreEqual(1234, reader.GetFieldValue(0).BlockNumber); - Assert.AreEqual(40000, reader.GetFieldValue(0).OffsetNumber); - Assert.AreEqual(expected.BlockNumber, reader.GetFieldValue(1).BlockNumber); - Assert.AreEqual(expected.OffsetNumber, reader.GetFieldValue(1).OffsetNumber); + Assert.That(reader.GetFieldValue(0).BlockNumber, Is.EqualTo(1234)); + Assert.That(reader.GetFieldValue(0).OffsetNumber, Is.EqualTo(40000)); + Assert.That(reader.GetFieldValue(1).BlockNumber, Is.EqualTo(expected.BlockNumber)); + Assert.That(reader.GetFieldValue(1).OffsetNumber, Is.EqualTo(expected.OffsetNumber)); } #region NpgsqlLogSequenceNumber / PgLsn - static readonly TestCaseData[] EqualsObjectCases = { + static readonly TestCaseData[] EqualsObjectCases = + [ new TestCaseData(new NpgsqlLogSequenceNumber(1ul), null).Returns(false), new TestCaseData(new NpgsqlLogSequenceNumber(1ul), new object()).Returns(false), new TestCaseData(new NpgsqlLogSequenceNumber(1ul), 1ul).Returns(false), // no implicit cast new TestCaseData(new NpgsqlLogSequenceNumber(1ul), "0/0").Returns(false), // no implicit cast/parsing - new TestCaseData(new NpgsqlLogSequenceNumber(1ul), new NpgsqlLogSequenceNumber(1ul)).Returns(true), - }; + new TestCaseData(new NpgsqlLogSequenceNumber(1ul), new NpgsqlLogSequenceNumber(1ul)).Returns(true) + ]; [Test, TestCaseSource(nameof(EqualsObjectCases))] public bool NpgsqlLogSequenceNumber_equals(NpgsqlLogSequenceNumber lsn, object? obj) @@ -77,7 +73,7 @@ public bool NpgsqlLogSequenceNumber_equals(NpgsqlLogSequenceNumber lsn, object? public async Task NpgsqlLogSequenceNumber() { var expected1 = new NpgsqlLogSequenceNumber(42949672971ul); - Assert.AreEqual(expected1, NpgsqlTypes.NpgsqlLogSequenceNumber.Parse("A/B")); + Assert.That(NpgsqlTypes.NpgsqlLogSequenceNumber.Parse("A/B"), Is.EqualTo(expected1)); await using var conn = await OpenConnectionAsync(); using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 'A/B'::pg_lsn, @p::pg_lsn"; @@ -86,15 +82,13 @@ public async Task NpgsqlLogSequenceNumber() reader.Read(); var result1 = reader.GetFieldValue(0); var result2 = reader.GetFieldValue(1); - Assert.AreEqual(expected1, result1); - Assert.AreEqual(42949672971ul, (ulong)result1); - Assert.AreEqual("A/B", result1.ToString()); - Assert.AreEqual(expected1, result2); - Assert.AreEqual(42949672971ul, (ulong)result2); - Assert.AreEqual("A/B", result2.ToString()); + Assert.That(result1, Is.EqualTo(expected1)); + Assert.That((ulong)result1, Is.EqualTo(42949672971ul)); + Assert.That(result1.ToString(), Is.EqualTo("A/B")); + Assert.That(result2, Is.EqualTo(expected1)); + Assert.That((ulong)result2, Is.EqualTo(42949672971ul)); + Assert.That(result2.ToString(), Is.EqualTo("A/B")); } #endregion NpgsqlLogSequenceNumber / PgLsn - - public InternalTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs new file mode 100644 index 0000000000..a3e68838ac --- /dev/null +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -0,0 +1,440 @@ +using System; +using System.Text.Json; +using System.Text.Json.Serialization; +using System.Threading.Tasks; +using Npgsql.Properties; +using NUnit.Framework; + +namespace Npgsql.Tests.Types; + +[TestFixture("json")] +[TestFixture("jsonb")] +public class JsonDynamicTests : TestBase +{ + [Test] + public async Task As_poco() + => await AssertType( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); + + [Test] + public async Task As_poco_long() + { + using var conn = CreateConnection(); + var bigString = new string('x', Math.Max(conn.Settings.ReadBufferSize, conn.Settings.WriteBufferSize)); + + await AssertType( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = bigString, + TemperatureC = 10 + }, + // Warning: in theory jsonb order and whitespace may change across versions + IsJsonb + ? $$"""{"Date": "2019-09-01T00:00:00", "Summary": "{{bigString}}", "TemperatureC": 10}""" + : $$"""{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"{{bigString}}"}""", + PostgresType, + dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); + } + + [Test] + public async Task As_poco_supported_only_with_EnableDynamicJson() + { + // This test uses base.DataSource, which doesn't have EnableDynamicJson() + + var errorMessage = string.Format( + NpgsqlStrings.DynamicJsonNotEnabled, + nameof(WeatherForecast), + nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + PostgresType, + base.DataSource); + + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead( + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + base.DataSource); + + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task Poco_does_not_stomp_GetValue_string() + { + var dataSource = CreateDataSourceBuilder() + .EnableDynamicJson([typeof(WeatherForecast)], [typeof(WeatherForecast)]) + .Build(); + var sqlLiteral = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{(IsJsonb ? "jsonb" : "json")}", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + Assert.That(reader.GetValue(0), Is.TypeOf()); + } + + [Test] + public async Task Custom_JsonSerializerOptions() + { + await using var dataSource = CreateDataSourceBuilder() + .ConfigureJsonOptions(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }) + .EnableDynamicJson() + .Build(); + + await AssertTypeWrite( + dataSource, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? """{"date": "2019-09-01T00:00:00", "summary": "Partly cloudy", "temperatureC": 10}""" + : """{"date":"2019-09-01T00:00:00","temperatureC":10,"summary":"Partly cloudy"}""", + PostgresType, + dataTypeInference: DataTypeInference.Nothing); + } + + [Test, Ignore("TODO We should not change the default type for json/jsonb, it makes little sense.")] + public async Task Poco_default_mapping() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + if (IsJsonb) + dataSourceBuilder.EnableDynamicJson(jsonbClrTypes: [typeof(WeatherForecast)]); + else + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(WeatherForecast)]); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + dataTypeInference: DataTypeInference.Nothing); + } + + #region Polymorphic + + [Test] + public async Task Poco_polymorphic_mapping() + { + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(WeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + } + + [Test] + public async Task Poco_polymorphic_mapping_read_parents() + { + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(WeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, + dataTypeInference: DataTypeInference.Nothing); + + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + valueTypeEqualsFieldType: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + } + + [Test] + public async Task Poco_exact_polymorphic_mapping() + { + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(ExtendedDerivedWeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + } + + [Test] + public async Task Poco_unspecified_polymorphic_mapping() + { + await using var dataSource = CreateDataSource(builder => + { + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + + // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + valueTypeEqualsFieldType: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + } + + [Test] + public async Task Poco_polymorphic_mapping_without_AllowOutOfOrderMetadataProperties() + { + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(WeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + + // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. + if (IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + valueTypeEqualsFieldType: false); + + // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + valueTypeEqualsFieldType: false); + + // We won't get the original value back for jsonb as we can't support polymorphism without also enforcing AllowOutOfOrderMetadataProperties is true. + // If we output $type, jsonb won't have that at the start and STJ will throw due to it appearing later in the object. So it's disabled entirely. + if (!IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + } + + [Test] + public async Task Poco_unspecified_polymorphic_mapping_without_AllowOutOfOrderMetadataProperties() + { + await using var dataSource = CreateDataSource(builder => + { + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) + .EnableDynamicJson(); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + + // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. + if (IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + valueTypeEqualsFieldType: false); + + // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + valueTypeEqualsFieldType: false); + + // We won't get the original value back for jsonb as we can't support polymorphism without also enforcing AllowOutOfOrderMetadataProperties is true. + // If we output $type, jsonb won't have that at the start and STJ will throw due to it appearing later in the object. So it's disabled entirely. + if (!IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); + } + + // ReSharper disable UnusedAutoPropertyAccessor.Local + // ReSharper disable UnusedMember.Local + [JsonDerivedType(typeof(ExtendedDerivedWeatherForecast), typeDiscriminator: "extended")] + record WeatherForecast + { + public DateTime Date { get; set; } + public int TemperatureC { get; set; } + public string Summary { get; set; } = ""; + } + + record DerivedWeatherForecast : WeatherForecast; + + record ExtendedDerivedWeatherForecast : DerivedWeatherForecast + { + public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); + } + // ReSharper restore UnusedMember.Local + // ReSharper restore UnusedAutoPropertyAccessor.Local + + #endregion Polymorphic + + public JsonDynamicTests(string dataTypeName) + { + DataSource = CreateDataSource(b => b.EnableDynamicJson()); + + if (dataTypeName == "jsonb") + using (var conn = OpenConnection()) + TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); + + PostgresType = dataTypeName; + } + + protected override NpgsqlDataSource DataSource { get; } + + [OneTimeTearDown] + protected void CleanUpDataSource() + { + DataSource.Dispose(); + } + + bool IsJsonb => PostgresType == "jsonb"; + string PostgresType { get; } +} diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 3d068aa3d2..1c5f732bfd 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -1,20 +1,30 @@ -using System.Threading.Tasks; +using System.Data; +using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; -public class JsonPathTests : MultiplexingTestBase +public class JsonPathTests : TestBase { - public JsonPathTests(MultiplexingMode multiplexingMode) - : base(multiplexingMode) { } + static readonly object[] ReadWriteCases = + [ + new object[] { "'$'", "$" }, + new object[] { "'$\"varname\"'", "$\"varname\"" } + ]; - static readonly object[] ReadWriteCases = new[] + [Test] + [TestCase("$")] + [TestCase("$\"varname\"")] + public async Task JsonPath(string jsonPath) { - new object[] { "'$'", "$" }, - new object[] { "'$\"varname\"'", "$\"varname\"" }, - }; + using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "12.0", "The jsonpath type was introduced in PostgreSQL 12"); + await AssertType( + jsonPath, jsonPath, "jsonpath", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); + } [Test] [TestCaseSource(nameof(ReadWriteCases))] @@ -41,6 +51,6 @@ public async Task Write(string query, string expected) using var cmd = new NpgsqlCommand($"SELECT 'Passed' WHERE @p::text = {query}::text", conn) { Parameters = { new NpgsqlParameter("p", NpgsqlDbType.JsonPath) { Value = expected } } }; using var rdr = await cmd.ExecuteReaderAsync(); - Assert.True(rdr.Read()); + Assert.That(rdr.Read()); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index c2a309d4ff..a113cfe2cf 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -1,21 +1,23 @@ using System; +using System.Data; +using System.IO; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Json)] -[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Jsonb)] -[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Json)] -[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Jsonb)] -public class JsonTests : MultiplexingTestBase +[TestFixture("json")] +[TestFixture("jsonb")] +public class JsonTests : TestBase { [Test] public async Task As_string() - => await AssertType(@"{""K"": ""V""}", @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefaultForWriting: false); + => await AssertType("""{"K": "V"}""", """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public async Task As_string_long() @@ -28,14 +30,16 @@ public async Task As_string_long() .Append(@"""}") .ToString(); - await AssertType(value, value, PostgresType, NpgsqlDbType, isDefaultForWriting: false); + await AssertType(value, value, + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); } [Test] public async Task As_string_with_GetTextReader() { await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($@"SELECT '{{""K"": ""V""}}'::{PostgresType}", conn); + await using var cmd = new NpgsqlCommand($$"""SELECT '{"K": "V"}'::{{PostgresType}}""", conn); await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); using var textReader = await reader.GetTextReaderAsync(0); @@ -44,72 +48,101 @@ public async Task As_string_with_GetTextReader() [Test] public async Task As_char_array() - => await AssertType(@"{""K"": ""V""}".ToCharArray(), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType("""{"K": "V"}""".ToCharArray(), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test] public async Task As_bytes() - => await AssertType(Encoding.ASCII.GetBytes(@"{""K"": ""V""}"), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType("""{"K": "V"}"""u8.ToArray(), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary), valueTypeEqualsFieldType: false); + + [Test] + public async Task Write_as_ReadOnlyMemory_of_byte() + => await AssertTypeWrite(new ReadOnlyMemory("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary)); [Test] public async Task Write_as_ArraySegment_of_char() - => await AssertTypeWrite( - new ArraySegment(@"{""K"": ""V""}".ToCharArray()), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); + + [Test] + public Task As_MemoryStream() + => AssertTypeWrite(() => new MemoryStream("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary)); [Test] public async Task As_JsonDocument() => await AssertType( - JsonDocument.Parse(@"{""K"": ""V""}"), - IsJsonb ? @"{""K"": ""V""}" : @"{""K"":""V""}", + JsonDocument.Parse("""{"K": "V"}"""), + IsJsonb ? """{"K": "V"}""" : """{"K":"V"}""", + PostgresType, + dataTypeInference: DataTypeInference.Mismatch, + comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString(), + valueTypeEqualsFieldType: false); + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5540")] + public async Task As_JsonDocument_with_null_root() + => await AssertType( + JsonDocument.Parse("null"), + "null", PostgresType, - NpgsqlDbType, - isDefault: false, - comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString()); + dataTypeInference: DataTypeInference.Mismatch, + comparer: (x, y) => x.RootElement.ValueKind == y.RootElement.ValueKind, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] - public async Task As_poco() + public async Task As_JsonElement_with_null_root() => await AssertType( - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - // Warning: in theory jsonb order and whitespace may change across versions - IsJsonb - ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}" - : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""Partly cloudy""}", + JsonDocument.Parse("null").RootElement, + "null", PostgresType, - NpgsqlDbType, - isDefault: false); + dataTypeInference: DataTypeInference.Mismatch, + comparer: (x, y) => x.ValueKind == y.ValueKind, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] - public async Task As_poco_long() + public async Task As_JsonDocument_supported_only_with_SystemTextJson() { - using var conn = CreateConnection(); - var bigString = new string('x', Math.Max(conn.Settings.ReadBufferSize, conn.Settings.WriteBufferSize)); - - await AssertType( - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = bigString, - TemperatureC = 10 - }, - // Warning: in theory jsonb order and whitespace may change across versions - IsJsonb - ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": """ + bigString + @""", ""TemperatureC"": 10}" - : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""" + bigString + @"""}", + await using var slimDataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); + + await AssertTypeUnsupported( + JsonDocument.Parse("""{"K": "V"}"""), + """{"K": "V"}""", PostgresType, - NpgsqlDbType, - isDefault: false); + slimDataSource); } - record WeatherForecast - { - public DateTime Date { get; set; } - public int TemperatureC { get; set; } - public string Summary { get; set; } = ""; - } + [Test] + public Task Roundtrip_string() + => AssertType( + @"{""p"": 1}", + @"{""p"": 1}", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: true); + + [Test] + public Task Roundtrip_char_array() + => AssertType( + @"{""p"": 1}".ToCharArray(), + @"{""p"": 1}", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); + + [Test] + public Task Roundtrip_byte_array() + => AssertType( + @"{""p"": 1}"u8.ToArray(), + @"{""p"": 1}", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary), valueTypeEqualsFieldType: false); [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2811")] @@ -117,18 +150,18 @@ record WeatherForecast [IssueLink("https://github.com/npgsql/efcore.pg/issues/1082")] public async Task Can_read_two_json_documents() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); JsonDocument car; - using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""foo""}'::jsonb", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using (var cmd = new NpgsqlCommand("""SELECT '{"key" : "foo"}'::jsonb""", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); car = reader.GetFieldValue(0); } - using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""bar""}'::jsonb", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using (var cmd = new NpgsqlCommand("""SELECT '{"key" : "bar"}'::jsonb""", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); reader.GetFieldValue(0); @@ -137,15 +170,81 @@ public async Task Can_read_two_json_documents() Assert.That(car.RootElement.GetProperty("key").GetString(), Is.EqualTo("foo")); } - public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) - : base(multiplexingMode) + [Test] + public Task Roundtrip_JsonObject() + => AssertType( + new JsonObject { ["Bar"] = 8 }, + IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", + PostgresType, + // By default we map JsonObject to jsonb + dataTypeInference: IsJsonb ? DataTypeInference.Match : DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + public Task Roundtrip_JsonArray() + => AssertType( + new JsonArray { 1, 2, 3 }, + IsJsonb ? "[1, 2, 3]" : "[1,2,3]", + PostgresType, + // By default we map JsonArray to jsonb + dataTypeInference: IsJsonb ? DataTypeInference.Match : DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jsonobject_array_without_npgsqldbtype() + { + // By default we map JsonObject to jsonb + if (!IsJsonb) + return; + + await using var conn = await OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject1 = new JsonObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + var jsonObject2 = new JsonObject + { + { "name", "value2" }, + { "amount", 2 }, + { "unit", "g" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); + await cmd.ExecuteNonQueryAsync(); + } + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/6517")] + public Task Roundtrip_JsonNode() + => AssertType( + (JsonNode)new JsonObject { ["Bar"] = 8 }, + IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", + PostgresType, + // By default we map JsonNode to jsonb + dataTypeInference: IsJsonb ? DataTypeInference.Match : DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + public JsonTests(string dataTypeName) { - using (var conn = OpenConnection()) - TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); - NpgsqlDbType = npgsqlDbType; + if (dataTypeName == "jsonb") + using (var conn = OpenConnection()) + TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); + + PostgresType = dataTypeName; } - bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; - string PostgresType => IsJsonb ? "jsonb" : "json"; - readonly NpgsqlDbType NpgsqlDbType; + bool IsJsonb => PostgresType == "jsonb"; + string PostgresType { get; } } diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index 48f7d950c9..b47bc910f6 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -1,23 +1,61 @@ -using System.Threading.Tasks; +using System.Data; +using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -[NonParallelizable] -public class LTreeTests : MultiplexingTestBase +public class LTreeTests : TestBase { [Test] public Task LQuery() - => AssertType("Top.Science.*", "Top.Science.*", "lquery", NpgsqlDbType.LQuery, isDefaultForWriting: false); + => AssertType("Top.Science.*", "Top.Science.*", + "lquery", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public Task LTree() - => AssertType("Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false); + => AssertType("Top.Science.Astronomy", "Top.Science.Astronomy", + "ltree", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public Task LTxtQuery() - => AssertType("Science & Astronomy", "Science & Astronomy", "ltxtquery", NpgsqlDbType.LTxtQuery, isDefaultForWriting: false); + => AssertType("Science & Astronomy", "Science & Astronomy", + "ltxtquery", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); + + [Test] + public async Task LTree_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var errorMessage = string.Format( + NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = + await AssertTypeUnsupportedRead>("Top.Science.Astronomy", "ltree", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedWrite("Top.Science.Astronomy", "ltree", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableLTree([Values] bool withArrays) + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableLTree(); + if (withArrays) + dataSourceBuilder.EnableArrays(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", + "ltree", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), + skipArrayCheck: !withArrays); + } [OneTimeSetUp] public async Task SetUp() @@ -26,6 +64,4 @@ public async Task SetUp() TestUtil.MinimumPgVersion(conn, "13.0"); await TestUtil.EnsureExtensionAsync(conn, "ltree"); } - - public LTreeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index f03444ca26..730188330e 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -1,14 +1,12 @@ using System; using System.Data; using System.Threading.Tasks; -using Npgsql.TypeMapping; -using NpgsqlTypes; +using Npgsql.Internal.ResolverFactories; using NUnit.Framework; using static Npgsql.Util.Statics; namespace Npgsql.Tests.Types; -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing [NonParallelizable] public class LegacyDateTimeTests : TestBase { @@ -18,20 +16,41 @@ public Task Timestamp_with_all_DateTime_kinds([Values] DateTimeKind kind) new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), "1998-04-12 13:26:38.789", "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime); + dbType: DbType.DateTime); + + [Test] + public async Task Timestamp_read_as_Unspecified_DateTime() + { + await using var command = DataSource.CreateCommand("SELECT '2020-03-01T10:30:00'::timestamp"); + var dateTime = (DateTime)(await command.ExecuteScalarAsync())!; + Assert.That(dateTime.Kind, Is.EqualTo(DateTimeKind.Unspecified)); + } + + [Test] + public async Task Timestamptz_negative_infinity() + { + var dto = await AssertType(DateTimeOffset.MinValue, "-infinity", "timestamp with time zone", + dbType: DbType.DateTimeOffset, valueTypeEqualsFieldType: false); + Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); + } + + [Test] + public async Task Timestamptz_infinity() + { + var dto = await AssertType( + DateTimeOffset.MaxValue, "infinity", "timestamp with time zone", dbType: DbType.DateTimeOffset, + valueTypeEqualsFieldType: false); + Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); + } [Test] [TestCase(DateTimeKind.Utc, TestName = "Timestamptz_write_utc_DateTime_does_not_convert")] [TestCase(DateTimeKind.Unspecified, TestName = "Timestamptz_write_unspecified_DateTime_does_not_convert")] public Task Timestamptz_write_utc_DateTime_does_not_convert(DateTimeKind kind) => AssertTypeWrite( - new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), - "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTimeOffset, - isDefault: false); + new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), "1998-04-12 15:26:38.789+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTimeOffset, DbType.DateTime)); [Test] public Task Timestamptz_local_DateTime_converts() @@ -41,29 +60,26 @@ public Task Timestamptz_local_DateTime_converts() var dateTime = new DateTime(1998, 4, 12, 13, 26, 38, 789, DateTimeKind.Utc).ToLocalTime(); return AssertType( - dateTime, - "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTimeOffset, - isDefaultForWriting: false); + dateTime, "1998-04-12 15:26:38.789+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTimeOffset, DbType.DateTime)); } - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) - { - var conn = await base.OpenConnectionAsync(connectionString); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; - } - - protected override NpgsqlConnection OpenConnection(string? connectionString = null) - => throw new NotSupportedException(); + NpgsqlDataSource _dataSource = null!; + protected override NpgsqlDataSource DataSource => _dataSource; [OneTimeSetUp] public void Setup() { #if DEBUG LegacyTimestampBehavior = true; + _dataSource = CreateDataSource(builder => + { + // Can't use the static AdoTypeInfoResolver instance, it already captured the feature flag. + builder.AddTypeInfoResolverFactory(new AdoTypeInfoResolverFactory()); + builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + }); + NpgsqlDataSourceBuilder.ResetGlobalMappings(overwrite: true); #else Assert.Ignore( "Legacy DateTime tests rely on the Npgsql.EnableLegacyTimestampBehavior AppContext switch and can only be run in DEBUG builds"); @@ -72,6 +88,11 @@ public void Setup() #if DEBUG [OneTimeTearDown] - public void Teardown() => LegacyTimestampBehavior = false; + public void Teardown() + { + LegacyTimestampBehavior = false; + _dataSource.Dispose(); + NpgsqlDataSourceBuilder.ResetGlobalMappings(overwrite: true); + } #endif } diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index f0ab30fb47..a047fce9b2 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using NpgsqlTypes; @@ -9,13 +9,16 @@ namespace Npgsql.Tests.Types; /// /// Tests on PostgreSQL types which don't fit elsewhere /// -class MiscTypeTests : MultiplexingTestBase +class MiscTypeTests : TestBase { [Test] public async Task Boolean() { - await AssertType(true, "true", "boolean", NpgsqlDbType.Boolean, DbType.Boolean); - await AssertType(false, "false", "boolean", NpgsqlDbType.Boolean, DbType.Boolean); + await AssertType(true, "true", "boolean", dbType: DbType.Boolean, skipArrayCheck: true); + await AssertType(false, "false", "boolean", dbType: DbType.Boolean, skipArrayCheck: true); + + // The literal representations for bools inside array are different ({t,f} instead of true/false, so we check separately. + await AssertType(new[] { true, false }, "{t,f}", "boolean[]"); } [Test] @@ -23,7 +26,7 @@ public Task Uuid() => AssertType( new Guid("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11"), "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", - "uuid", NpgsqlDbType.Uuid, DbType.Guid); + "uuid", dbType: DbType.Guid); [Test, Description("Makes sure that the PostgreSQL 'unknown' type (OID 705) is read properly")] public async Task Read_unknown() @@ -47,7 +50,7 @@ public async Task Null() { cmd.Parameters.AddWithValue("p1", DBNull.Value); cmd.Parameters.Add(new NpgsqlParameter("p2", null)); - cmd.Parameters.Add(new NpgsqlParameter("p3", DBNull.Value)); + cmd.Parameters.Add(new NpgsqlParameter("p3", DBNull.Value)); await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); @@ -58,81 +61,21 @@ public async Task Null() } } - // Setting non-generic NpgsqlParameter.Value is not allowed, only DBNull.Value - await using (var cmd = new NpgsqlCommand("SELECT @p::TEXT", conn)) + // Setting non-generic NpgsqlParameter.Value to null is not allowed, only DBNull.Value + await using (var cmd = new NpgsqlCommand("SELECT @p4::TEXT", conn)) { cmd.Parameters.AddWithValue("p4", NpgsqlDbType.Text, null!); - Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); + Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); } - } - - #region Record - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/724")] - [IssueLink("https://github.com/npgsql/npgsql/issues/1980")] - public async Task Read_Record_as_object_array() - { - var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - var record = (object[])reader[0]; - Assert.That(record[0], Is.EqualTo(1)); - Assert.That(record[1], Is.EqualTo("foo")); - - var array = (object[][])reader[1]; - Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); - } - - [Test] - public async Task Read_Record_as_ValueTuple() - { - var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - var record = reader.GetFieldValue<(int, string)>(0); - Assert.That(record.Item1, Is.EqualTo(1)); - Assert.That(record.Item2, Is.EqualTo("foo")); - - var array = (object[][])reader[1]; - Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); - } - - [Test] - public async Task Read_Record_as_Tuple() - { - var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - var record = reader.GetFieldValue>(0); - Assert.That(record.Item1, Is.EqualTo(1)); - Assert.That(record.Item2, Is.EqualTo("foo")); - - var array = (object[][])reader[1]; - Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); + // Setting generic NpgsqlParameter.Value to null is not allowed, only DBNull.Value + await using (var cmd = new NpgsqlCommand("SELECT @p4::TEXT", conn)) + { + cmd.Parameters.Add(new NpgsqlParameter("p4", NpgsqlDbType.Text) { Value = null! }); + Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); + } } - [Test] - public Task Write_Record_is_not_supported() - => AssertTypeUnsupportedWrite(new object[] { 1, "foo" }, "record"); - - #endregion Record - [Test, Description("Makes sure that setting DbType.Object makes Npgsql infer the type")] [IssueLink("https://github.com/npgsql/npgsql/issues/694")] public async Task DbType_causes_inference() @@ -160,16 +103,28 @@ public async Task AllResultTypesAreUnknown() [Test, Description("Mixes and matches an unknown type with a known type")] public async Task UnknownResultTypeList() { - if (IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT TRUE, 8", conn); - cmd.UnknownResultTypeList = new[] { true, false }; + cmd.UnknownResultTypeList = [true, false]; await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(string))); Assert.That(reader.GetString(0), Is.EqualTo("t")); + Assert.That(reader.GetValue(0), Is.EqualTo("t")); + Assert.That(reader.GetFieldValue(0), Is.EqualTo("t")); + + // Try some alternative text types + Assert.That(reader.GetFieldValue(0), Is.EqualTo("t")); + Assert.That(reader.GetFieldValue(0), Is.EqualTo("t")); + + // Try as async + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + + // Normal binary column Assert.That(reader.GetInt32(1), Is.EqualTo(8)); } @@ -210,29 +165,42 @@ public async Task Send_unknown() #endregion + + [Test] + public async Task ObjectArray() + { + await AssertTypeWrite(new object?[] { (short)4, null, (long)5, 6 }, "{4,NULL,5,6}", + "integer[]", dataTypeInference: DataTypeInference.Nothing); + await AssertTypeWrite(new object?[] { "text", null, DBNull.Value, "chars".ToCharArray(), 'c' }, "{text,NULL,NULL,chars,c}", + "text[]", dataTypeInference: DataTypeInference.Nothing); + + await using var dataSource = CreateDataSource(b => b.ConnectionStringBuilder.Timezone = "Europe/Berlin"); + await AssertTypeWrite(dataSource, new object?[] { DateTime.UnixEpoch, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) }, + "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", + "timestamp with time zone[]", dataTypeInference: DataTypeInference.Nothing); + Assert.ThrowsAsync(() => AssertTypeWrite(dataSource, new object?[] + { + DateTime.Now, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) + }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", + dataTypeInference: DataTypeInference.Nothing)); + } + [Test] public Task Int2Vector() - => AssertType(new short[] { 4, 5, 6 }, "4 5 6", "int2vector", NpgsqlDbType.Int2Vector, isDefault: false); + => AssertType(new short[] { 4, 5, 6 }, "4 5 6", + "int2vector", dataTypeInference: DataTypeInference.Mismatch, + // int2vector mappings require a data type name, so passing a value of type short[][] will result in no mapping. + skipArrayCheck: true); [Test] public Task Oidvector() - => AssertType(new uint[] { 4, 5, 6 }, "4 5 6", "oidvector", NpgsqlDbType.Oidvector, isDefault: false); + => AssertType(new uint[] { 4, 5, 6 }, "4 5 6", + "oidvector", dataTypeInference: DataTypeInference.Nothing); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1138")] public async Task Void() { await using var conn = await OpenConnectionAsync(); - Assert.That(await conn.ExecuteScalarAsync("SELECT pg_sleep(0)"), Is.SameAs(DBNull.Value)); + Assert.That(await conn.ExecuteScalarAsync("SELECT pg_sleep(0)"), Is.SameAs(null)); } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1364")] - public async Task Unsupported_DbType() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - Assert.That(() => cmd.Parameters.Add(new NpgsqlParameter("p", DbType.UInt32) { Value = 8u }), - Throws.Exception.TypeOf()); - } - - public MiscTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/MoneyTests.cs b/test/Npgsql.Tests/Types/MoneyTests.cs index 8aceb03dac..a0bf7f1e57 100644 --- a/test/Npgsql.Tests/Types/MoneyTests.cs +++ b/test/Npgsql.Tests/Types/MoneyTests.cs @@ -1,4 +1,3 @@ -using System; using System.Data; using System.Threading.Tasks; using NpgsqlTypes; @@ -8,8 +7,8 @@ namespace Npgsql.Tests.Types; public class MoneyTests : TestBase { - static readonly object[] MoneyValues = new[] - { + static readonly object[] MoneyValues = + [ new object[] { "$1.22", 1.22M }, new object[] { "$1,000.22", 1000.22M }, new object[] { "$1,000,000.22", 1000000.22M }, @@ -19,8 +18,8 @@ public class MoneyTests : TestBase new object[] { "$92,233,720,368,547,758.07", +92233720368547758.07M }, new object[] { "-$92,233,720,368,547,758.08", -92233720368547758.08M }, - new object[] { "-$92,233,720,368,547,758.08", -92233720368547758.08M }, - }; + new object[] { "-$92,233,720,368,547,758.08", -92233720368547758.08M } + ]; [Test] [TestCaseSource(nameof(MoneyValues))] @@ -28,7 +27,9 @@ public async Task Money(string sqlLiteral, decimal money) { using var conn = await OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET lc_monetary='C'"); - await AssertType(conn, money, sqlLiteral, "money", NpgsqlDbType.Money, DbType.Currency, isDefault: false); + await AssertType(conn, money, sqlLiteral, + "money", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Currency, DbType.Decimal)); } [Test] @@ -42,11 +43,11 @@ public async Task Non_decimal_types_are_not_supported() await AssertTypeUnsupportedRead("8", "money"); } - static readonly object[] WriteWithLargeScaleCases = new[] - { + static readonly object[] WriteWithLargeScaleCases = + [ new object[] { "0.004::money", 0.004M, 0.00M }, - new object[] { "0.005::money", 0.005M, 0.01M }, - }; + new object[] { "0.005::money", 0.005M, 0.01M } + ]; [Test] [TestCaseSource(nameof(WriteWithLargeScaleCases))] @@ -60,4 +61,4 @@ public async Task Write_with_large_scale(string query, decimal parameter, decima Assert.That(decimal.GetBits(rdr.GetFieldValue(0)), Is.EqualTo(decimal.GetBits(expected))); Assert.That(rdr.GetFieldValue(1)); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 9479e72eff..9bf53bf528 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -1,6 +1,8 @@ using System; using System.Collections.Generic; +using System.Data; using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -9,189 +11,162 @@ namespace Npgsql.Tests.Types; public class MultirangeTests : TestBase { - [Test] - public async Task Read() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT '{[3,7), (8,]}'::int4multirange", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4multirange")); - - var multirangeArray = (NpgsqlRange[])reader[0]; - Assert.That(multirangeArray.Length, Is.EqualTo(2)); - Assert.That(multirangeArray[0], Is.EqualTo(new NpgsqlRange(3, true, false, 7, false, false))); - Assert.That(multirangeArray[1], Is.EqualTo(new NpgsqlRange(9, true, false, 0, false, true))); - - var multirangeList = reader.GetFieldValue>>(0); - Assert.That(multirangeList.Count, Is.EqualTo(2)); - Assert.That(multirangeList[0], Is.EqualTo(new NpgsqlRange(3, true, false, 7, false, false))); - Assert.That(multirangeList[1], Is.EqualTo(new NpgsqlRange(9, true, false, 0, false, true))); - } - - [Test] - public async Task Write() - { - var multirangeArray = new NpgsqlRange[] - { - new(3, true, false, 7, false, false), - new(8, false, false, 0, false, true) - }; - - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) - { - await conn.ReloadTypesAsync(); - cmd.Parameters.Add(new() { Value = multirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),[9,)}")); - - await conn.ReloadTypesAsync(); - cmd.Parameters[0] = new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.IntegerMultirange }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),[9,)}")); - - await conn.ReloadTypesAsync(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "int4multirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),[9,)}")); - } - } + static readonly TestCaseData[] MultirangeTestCases = + [ + // int4multirange + new TestCaseData( + new NpgsqlRange[] + { + new(3, true, false, 7, false, false), + new(9, true, false, 0, false, true) + }, + "{[3,7),[9,)}", "int4multirange", DataTypeInference.Match, true, default(NpgsqlRange)) + .SetName("Int"), + + // int8multirange + new TestCaseData( + new NpgsqlRange[] + { + new(3, true, false, 7, false, false), + new(9, true, false, 0, false, true) + }, + "{[3,7),[9,)}", "int8multirange", DataTypeInference.Match, true, default(NpgsqlRange)) + .SetName("Long"), + + // nummultirange + // numeric is non-discrete so doesn't undergo normalization, use that to test bound scenarios which otherwise get normalized + new TestCaseData( + new NpgsqlRange[] + { + new(3, true, false, 7, true, false), + new(9, false, false, 0, false, true) + }, + "{[3,7],(9,)}", "nummultirange", DataTypeInference.Match, true, default(NpgsqlRange)) + .SetName("Decimal"), + + // daterange + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), + new(new(2020, 1, 10), true, false, default, false, true) + }, + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", DataTypeInference.Match, true, default(NpgsqlRange)) + .SetName("DateTime DateMultirange"), + + // tsmultirange + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), + new(new(2020, 1, 10), true, false, default, false, true) + }, + """{["2020-01-01 00:00:00","2020-01-05 00:00:00"),["2020-01-10 00:00:00",)}""", "tsmultirange", DataTypeInference.Match, true, default(NpgsqlRange)) + .SetName("DateTime TimestampMultirange"), + + // tstzmultirange + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1, 0, 0, 0, kind: DateTimeKind.Utc), true, false, new(2020, 1, 5, 0, 0, 0, kind: DateTimeKind.Utc), false, false), + new(new(2020, 1, 10, 0, 0, 0, kind: DateTimeKind.Utc), true, false, default, false, true) + }, + """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", DataTypeInference.Match, true, default(NpgsqlRange)) + .SetName("DateTime TimestampTzMultirange"), + + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), + new(new(2020, 1, 10), true, false, default, false, true) + }, + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", DataTypeInference.Mismatch, true, default(NpgsqlRange)) + .SetName("DateOnly") + ]; + + [Test, TestCaseSource(nameof(MultirangeTestCases))] + public Task Multirange_as_array( + T multirangeAsArray, string sqlLiteral, string dataTypeName, DataTypeInference datatypeDataTypeInference, bool valueTypeEqualsFieldType, TRange _) + => AssertType(multirangeAsArray, sqlLiteral, dataTypeName, + dataTypeInference: datatypeDataTypeInference, valueTypeEqualsFieldType: valueTypeEqualsFieldType); + + [Test, TestCaseSource(nameof(MultirangeTestCases))] + public Task Multirange_as_list( + T multirangeAsArray, string sqlLiteral, string dataTypeName, DataTypeInference datatypeDataTypeInference, bool valueTypeEqualsFieldType, TRange _) + where T : IList + => AssertType( + new List(multirangeAsArray), sqlLiteral, dataTypeName, + dataTypeInference: datatypeDataTypeInference, valueTypeEqualsFieldType: false); [Test] - public async Task Write_nummultirange() + public async Task Unmapped_multirange_with_mapped_subtype() { - var multirangeArray = new NpgsqlRange[] - { - new(3, true, false, 7, false, false), - new(8, false, false, 0, false, true) - }; - - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) - { - conn.ReloadTypes(); - cmd.Parameters.Add(new() { Value = multirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),(8,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.NumericMultirange }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),(8,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "nummultirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),(8,)}")); - } - } - - [Test] - public async Task Read_Datemultirange() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT '{[2020-01-01,2020-01-05), (2020-01-10,]}'::datemultirange", conn); - await using var reader = await cmd.ExecuteReaderAsync(); + await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes().ConnectionStringBuilder.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + + var typeName = await GetTempTypeName(conn); + await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); + conn.ReloadTypes(); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + + var value = new[] {new NpgsqlRange( + new string('a', conn.Settings.WriteBufferSize + 10).ToCharArray(), + new string('z', conn.Settings.WriteBufferSize + 10).ToCharArray() + )}; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { DataTypeName = typeName + "_multirange", ParameterName = "p", Value = value }); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); await reader.ReadAsync(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("datemultirange")); - - var multirangeDateTimeArray = (NpgsqlRange[])reader[0]; - Assert.That(multirangeDateTimeArray.Length, Is.EqualTo(2)); - Assert.That(multirangeDateTimeArray[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateTimeArray[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); - - var multirangeDateTimeList = reader.GetFieldValue>>(0); - Assert.That(multirangeDateTimeList.Count, Is.EqualTo(2)); - Assert.That(multirangeDateTimeList[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateTimeList[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); - -#if NET6_0_OR_GREATER - var multirangeDateOnlyArray = reader.GetFieldValue[]>(0); - Assert.That(multirangeDateOnlyArray.Length, Is.EqualTo(2)); - Assert.That(multirangeDateOnlyArray[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateOnlyArray[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); - - var multirangeDateOnlyList = reader.GetFieldValue>>(0); - Assert.That(multirangeDateOnlyList.Count, Is.EqualTo(2)); - Assert.That(multirangeDateOnlyList[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateOnlyList[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); -#endif + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(NpgsqlRange[]))); + var result = reader.GetFieldValue[]>(0); + Assert.That(result, Is.EqualTo(value).Using[]>((actual, expected) => + actual[0].LowerBound!.SequenceEqual(expected[0].LowerBound!) && actual[0].UpperBound!.SequenceEqual(expected[0].UpperBound!))); } -#if NET6_0_OR_GREATER [Test] - public async Task Write_Datemultirange_DateOnly() + public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() { - var multirangeArray = new NpgsqlRange[] - { - new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), - new(new(2020, 1, 10), false, false, default, false, true) - }; - - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) - { - conn.ReloadTypes(); - cmd.Parameters.Add(new() { Value = multirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.DateMultirange }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "datemultirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - } + await using var connection = await DataSource.OpenConnectionAsync(); + var rangeType = await GetTempTypeName(connection); + var multirangeTypeName = rangeType + "_multirange"; + await connection.ExecuteNonQueryAsync($"CREATE TYPE {rangeType} AS RANGE(subtype=text)"); + await connection.ReloadTypesAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite( + new NpgsqlRange[] + { + new("bar", "foo"), + new("moo", "zoo"), + }, + multirangeTypeName); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead("""{["bar","foo"],["moo","zoo"]}""", + multirangeTypeName); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead>( + """{["bar","foo"],["moo","zoo"]}""", + multirangeTypeName); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } -#endif - - [Test] - public async Task Write_Datemultirange_DateTime() - { - var multirangeArray = new NpgsqlRange[] - { - new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), - new(new(2020, 1, 10), false, false, default, false, true) - }; - var multirangeList = new List>(multirangeArray); + protected override NpgsqlDataSource DataSource { get; } - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) + public MultirangeTests() => DataSource = CreateDataSource(builder => { - conn.ReloadTypes(); - cmd.Parameters.Add(new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.DateMultirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "datemultirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - } - } + builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + }); [OneTimeSetUp] public async Task Setup() @@ -199,4 +174,7 @@ public async Task Setup() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); } + + [OneTimeTearDown] + public void TearDown() => DataSource.Dispose(); } diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index 5e7de43989..ffecfe3247 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Net; using System.Net.NetworkInformation; using System.Threading.Tasks; @@ -13,11 +13,21 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-net-types.html /// -class NetworkTypeTests : MultiplexingTestBase +class NetworkTypeTests : TestBase { [Test] public Task Inet_v4_as_IPAddress() - => AssertType(IPAddress.Parse("192.168.1.1"), "192.168.1.1/32", "inet", NpgsqlDbType.Inet); + => AssertType(IPAddress.Parse("192.168.1.1"), "192.168.1.1/32", "inet", skipArrayCheck: true); + + [Test] + public Task Inet_v4_array_as_IPAddress_array() + => AssertType( + new[] + { + IPAddress.Parse("192.168.1.1"), + IPAddress.Parse("192.168.1.2") + }, + "{192.168.1.1,192.168.1.2}", "inet[]"); [Test] public Task Inet_v6_as_IPAddress() @@ -25,43 +35,46 @@ public Task Inet_v6_as_IPAddress() IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), "2001:1db8:85a3:1142:1000:8a2e:1370:7334/128", "inet", - NpgsqlDbType.Inet); + skipArrayCheck: true); [Test] - public Task Inet_v4_as_tuple() - => AssertType((IPAddress.Parse("192.168.1.1"), 24), "192.168.1.1/24", "inet", NpgsqlDbType.Inet, isDefaultForReading: false); - - [Test] - public Task Inet_v6_as_tuple() + public Task Inet_v6_array_as_IPAddress_array() => AssertType( - (IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24), - "2001:1db8:85a3:1142:1000:8a2e:1370:7334/24", - "inet", - NpgsqlDbType.Inet, - isDefaultForReading: false); + new[] + { + IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), + IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7335") + }, + "{2001:1db8:85a3:1142:1000:8a2e:1370:7334,2001:1db8:85a3:1142:1000:8a2e:1370:7335}", "inet[]"); [Test, IssueLink("https://github.com/dotnet/corefx/issues/33373")] public Task IPAddress_Any() - => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", NpgsqlDbType.Inet); + => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", skipArrayCheck: true); [Test] - public Task Cidr() + public Task IPNetwork_as_cidr() => AssertType( - (Address: IPAddress.Parse("192.168.1.0"), Subnet: 24), + new IPNetwork(IPAddress.Parse("192.168.1.0"), 24), + "192.168.1.0/24", + "cidr"); + +#pragma warning disable CS0618 // NpgsqlCidr is obsolete + [Test] + public Task NpgsqlCidr_as_Cidr() + => AssertType( + new NpgsqlCidr(IPAddress.Parse("192.168.1.0"), netmask: 24), "192.168.1.0/24", "cidr", - NpgsqlDbType.Cidr, - isDefaultForWriting: false); + valueTypeEqualsFieldType: false); +#pragma warning restore CS0618 -#pragma warning disable 618 // For NpgsqlInet [Test] public Task Inet_v4_as_NpgsqlInet() => AssertType( new NpgsqlInet(IPAddress.Parse("192.168.1.1"), 24), "192.168.1.1/24", "inet", - NpgsqlDbType.Inet, - isDefaultForReading: false); + valueTypeEqualsFieldType: false); [Test] public Task Inet_v6_as_NpgsqlInet() @@ -69,13 +82,11 @@ public Task Inet_v6_as_NpgsqlInet() new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24), "2001:1db8:85a3:1142:1000:8a2e:1370:7334/24", "inet", - NpgsqlDbType.Inet, - isDefaultForReading: false); -#pragma warning restore 618 // For NpgsqlInet + valueTypeEqualsFieldType: false); [Test] public Task Macaddr() - => AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:01:02:03", "macaddr", NpgsqlDbType.MacAddr); + => AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:01:02:03", "macaddr"); [Test] public async Task Macaddr8() @@ -84,8 +95,8 @@ public async Task Macaddr8() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - await AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "08:00:2b:01:02:03:04:05", "macaddr8", NpgsqlDbType.MacAddr8, - isDefaultForWriting: false); + await AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "08:00:2b:01:02:03:04:05", + "macaddr8", dataTypeInference: DataTypeInference.Mismatch); } [Test] @@ -95,8 +106,8 @@ public async Task Macaddr8_write_with_6_bytes() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - await AssertTypeWrite(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:ff:fe:01:02:03", "macaddr8", NpgsqlDbType.MacAddr8, - isDefault: false); + await AssertTypeWrite(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:ff:fe:01:02:03", + "macaddr8", dataTypeInference: DataTypeInference.Mismatch); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/835")] @@ -120,11 +131,6 @@ public async Task Macaddr_write_validation() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - var exception = await AssertTypeUnsupportedWrite( - PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "macaddr"); - - Assert.That(exception.Message, Does.StartWith("22P03:").And.Contain("1")); + await AssertTypeUnsupportedWrite(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "macaddr"); } - - public NetworkTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index d184221e31..c73617f819 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -1,17 +1,16 @@ -using System; +using System; using System.Data; using System.Linq; using System.Numerics; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -public class NumericTests : MultiplexingTestBase +public class NumericTests : TestBase { - static readonly object[] ReadWriteCases = new[] - { + static readonly object[] ReadWriteCases = + [ new object[] { "0.0000000000000000000000000001::numeric", 0.0000000000000000000000000001M }, new object[] { "0.000000000000000000000001::numeric", 0.000000000000000000000001M }, new object[] { "0.00000000000000000001::numeric", 0.00000000000000000001M }, @@ -19,6 +18,7 @@ public class NumericTests : MultiplexingTestBase new object[] { "0.000000000001::numeric", 0.000000000001M }, new object[] { "0.00000001::numeric", 0.00000001M }, new object[] { "0.0001::numeric", 0.0001M }, + new object[] { "0.123456000000000100000000::numeric", 0.123456000000000100000000M }, new object[] { "1::numeric", 1M }, new object[] { "10000::numeric", 10000M }, new object[] { "100000000::numeric", 100000000M }, @@ -44,6 +44,7 @@ public class NumericTests : MultiplexingTestBase new object[] { "1E+24::numeric", 1000000000000000000000000M }, new object[] { "1E+28::numeric", 10000000000000000000000000000M }, + new object[] { "1.2222333344445555666677778888::numeric", 1.2222333344445555666677778888M }, new object[] { "11.222233334444555566667777888::numeric", 11.222233334444555566667777888M }, new object[] { "111.22223333444455556666777788::numeric", 111.22223333444455556666777788M }, new object[] { "1111.2222333344445555666677778::numeric", 1111.2222333344445555666677778M }, @@ -74,14 +75,16 @@ public class NumericTests : MultiplexingTestBase // Bug 2033 new object[] { "0.0036882500000000000000000000", 0.0036882500000000000000000000M }, + // Bug 5848 + new object[] { "10836968.715000000000000000000000", 10836968.715000000000000000000000M }, new object[] { "936490726837837729197", 936490726837837729197M }, new object[] { "9364907268378377291970000", 9364907268378377291970000M }, new object[] { "3649072683783772919700000000", 3649072683783772919700000000M }, new object[] { "1234567844445555.000000000", 1234567844445555.000000000M }, new object[] { "11112222000000000000", 11112222000000000000M }, - new object[] { "0::numeric", 0M }, - }; + new object[] { "0::numeric", 0M } + ]; [Test] [TestCaseSource(nameof(ReadWriteCases))] @@ -89,9 +92,8 @@ public async Task Read(string query, decimal expected) { using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT " + query, conn); - Assert.That( - decimal.GetBits((decimal)(await cmd.ExecuteScalarAsync())!), - Is.EqualTo(decimal.GetBits(expected))); + var value = (decimal)(await cmd.ExecuteScalarAsync())!; + Assert.That(decimal.GetBits(value), Is.EqualTo(decimal.GetBits(expected))); } [Test] @@ -111,15 +113,21 @@ public async Task Write(string query, decimal expected) [Test] public async Task Numeric() { - await AssertType(5.5m, "5.5", "numeric", NpgsqlDbType.Numeric, DbType.Decimal); - await AssertTypeWrite(5.5m, "5.5", "numeric", NpgsqlDbType.Numeric, DbType.VarNumeric, inferredDbType: DbType.Decimal); - - await AssertType((short)8, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType((byte)8, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8F, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8D, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8M, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); + await AssertType(5.5m, "5.5", "numeric", dbType: DbType.Decimal); + await AssertTypeWrite(5.5m, "5.5", "numeric", dbType: new(DbType.Decimal, DbType.Decimal, DbType.VarNumeric)); + + await AssertType((short)8, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int32), valueTypeEqualsFieldType: false); + await AssertType(8L, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int64), valueTypeEqualsFieldType: false); + await AssertType((byte)8, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int16), valueTypeEqualsFieldType: false, skipArrayCheck: true); + await AssertType(8F, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Double), valueTypeEqualsFieldType: false); } [Test, Description("Tests that when Numeric value does not fit in a System.Decimal and reader is in ReaderState.InResult, the value was read wholly and it is safe to continue reading")] @@ -150,15 +158,19 @@ public async Task Read_overflow_is_safe() [TestCaseSource(nameof(ReadWriteCases))] public async Task Read_BigInteger(string query, decimal expected) { + var bigInt = new BigInteger(expected); + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT " + query, conn); + using var rdr = await cmd.ExecuteReaderAsync(); + await rdr.ReadAsync(); + if (decimal.Floor(expected) == expected) - { - var bigInt = new BigInteger(expected); - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT " + query, conn); - using var rdr = await cmd.ExecuteReaderAsync(); - await rdr.ReadAsync(); Assert.That(rdr.GetFieldValue(0), Is.EqualTo(bigInt)); - } + else + Assert.That(() => rdr.GetFieldValue(0), + Throws.Exception + .With.TypeOf() + .With.Message.EqualTo("Numeric value with non-zero fractional digits not supported by BigInteger")); } [Test] @@ -191,5 +203,32 @@ public async Task BigInteger_large() Assert.That(rdr.GetFieldValue(1), Is.EqualTo(num)); } - public NumericTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + [Test] + public async Task NumericZero_WithScale() + { + // Scale should not be lost when dealing with 0 + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p", conn); + var param = new NpgsqlParameter("p", DbType.Decimal, 10, null, ParameterDirection.Input, false, 10, 2, DataRowVersion.Default, 0.00M); + cmd.Parameters.Add(param); + using var rdr = await cmd.ExecuteReaderAsync(); + await rdr.ReadAsync(); + var value = rdr.GetFieldValue(0); + + Assert.That(value.Scale, Is.EqualTo(2)); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6383")] + public async Task Read_Many_Numerics_As_BigInteger([Values(CommandBehavior.Default, CommandBehavior.SequentialAccess)] CommandBehavior behavior) + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 1234567890::numeric FROM generate_series(1, 8000)"; + + await using var reader = await cmd.ExecuteReaderAsync(behavior); + while (await reader.ReadAsync()) + { + Assert.DoesNotThrowAsync(async () => await reader.GetFieldValueAsync(0)); + } + } } diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index 9c5c13c027..795efc16ce 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -1,11 +1,8 @@ -using System; -using System.Collections.Generic; +using System; using System.Data; using System.Globalization; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; -using NUnit.Framework.Internal; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; @@ -16,62 +13,87 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-numeric.html /// -public class NumericTypeTests : MultiplexingTestBase +public class NumericTypeTests : TestBase { [Test] public async Task Int16() { - await AssertType((short)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16); - - await AssertType(8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8L, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType((byte)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8F, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8D, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8M, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); + await AssertType((short)8, "8", "smallint", dbType: DbType.Int16); + // Clr byte/sbyte maps to 'int2' as there is no byte type in PostgreSQL, byte[] maps to bytea however. + await AssertType((byte)8, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.Int16, valueTypeEqualsFieldType: false, skipArrayCheck: true); + await AssertType((sbyte)8, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.Int16, valueTypeEqualsFieldType: false); + + await AssertType(8, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Int32), valueTypeEqualsFieldType: false); + await AssertType(8L, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Int64), valueTypeEqualsFieldType: false); + await AssertType(8F, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Double), valueTypeEqualsFieldType: false); + await AssertType(8M, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Decimal), valueTypeEqualsFieldType: false); } [Test] public async Task Int32() { - await AssertType(8, "8", "integer", NpgsqlDbType.Integer, DbType.Int32); - - await AssertType((short)8, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8L, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType((byte)8, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8F, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8D, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8M, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); + await AssertType(8, "8", "integer", dbType: DbType.Int32); + + await AssertType((short)8, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8L, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int64), valueTypeEqualsFieldType: false); + await AssertType((byte)8, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int16), valueTypeEqualsFieldType: false, skipArrayCheck: true); // byte[] maps to bytea + await AssertType((sbyte)8, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8F, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Double), valueTypeEqualsFieldType: false); + await AssertType(8M, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Decimal), valueTypeEqualsFieldType: false); } [Test, Description("Tests some types which are aliased to UInt32")] - [TestCase("oid", NpgsqlDbType.Oid, TestName="OID")] - [TestCase("xid", NpgsqlDbType.Xid, TestName="XID")] - [TestCase("cid", NpgsqlDbType.Cid, TestName="CID")] - public Task UInt32(string pgTypeName, NpgsqlDbType npgsqlDbType) - => AssertType(8u, "8", pgTypeName, npgsqlDbType, isDefaultForWriting: false); + [TestCase("oid", TestName="OID")] + [TestCase("xid", TestName="XID")] + [TestCase("cid", TestName="CID")] + public Task UInt32(string dataTypeName) + => AssertType(8u, "8", dataTypeName, dataTypeInference: DataTypeInference.Nothing); [Test] - [TestCase("xid8", NpgsqlDbType.Xid8, TestName="XID8")] - public async Task UInt64(string pgTypeName, NpgsqlDbType npgsqlDbType) + [TestCase("xid8", TestName="XID8")] + public async Task UInt64(string dataTypeName) { await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "13.0", "The xid8 type was introduced in PostgreSQL 13"); - await AssertType(8ul, "8", pgTypeName, npgsqlDbType, isDefaultForWriting: false); + await AssertType(8ul, "8", dataTypeName, dataTypeInference: DataTypeInference.Nothing); } [Test] public async Task Int64() { - await AssertType(8L, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64); - - await AssertType((short)8, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType((byte)8, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8F, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8D, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8M, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); + await AssertType(8L, "8", "bigint", dbType: DbType.Int64); + + await AssertType((short)8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int32), valueTypeEqualsFieldType: false); + await AssertType((byte)8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int16), valueTypeEqualsFieldType: false, skipArrayCheck: true); // byte[] maps to bytea + await AssertType((sbyte)8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8F, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Double), valueTypeEqualsFieldType: false); + await AssertType(8M, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Decimal), valueTypeEqualsFieldType: false); } [Test] @@ -84,7 +106,7 @@ public async Task Double(double value, string sqlLiteral) await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "12.0"); - await AssertType(value, sqlLiteral, "double precision", NpgsqlDbType.Double, DbType.Double); + await AssertType(value, sqlLiteral, "double precision", dbType: DbType.Double); } [Test] @@ -93,21 +115,19 @@ public async Task Double(double value, string sqlLiteral) [TestCase(float.PositiveInfinity, "Infinity", TestName = "Float_PositiveInfinity")] [TestCase(float.NegativeInfinity, "-Infinity", TestName = "Float_NegativeInfinity")] public Task Float(float value, string sqlLiteral) - => AssertType(value, sqlLiteral, "real", NpgsqlDbType.Real, DbType.Single); + => AssertType(value, sqlLiteral, "real", dbType: DbType.Single); [Test] [TestCase(short.MaxValue + 1, "smallint")] [TestCase(int.MaxValue + 1L, "integer")] [TestCase(long.MaxValue + 1D, "bigint")] - public Task Write_overflow(T value, string pgTypeName) - => AssertTypeUnsupportedWrite(value, pgTypeName); + public Task Write_overflow(T value, string dataTypeName) + => AssertTypeUnsupportedWrite(value, dataTypeName); [Test] [TestCase((short)0, short.MaxValue + 1D, "int")] [TestCase(0, int.MaxValue + 1D, "bigint")] [TestCase(0L, long.MaxValue + 1D, "decimal")] - public Task Read_overflow(T _, double value, string pgTypeName) - => AssertTypeUnsupportedRead(value.ToString(CultureInfo.InvariantCulture), pgTypeName); - - public NumericTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file + public Task Read_overflow(T _, double value, string dataTypeName) + => AssertTypeUnsupportedRead(value.ToString(CultureInfo.InvariantCulture), dataTypeName); +} diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 4d489ad108..df83c68358 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -1,131 +1,69 @@ -using System; +using System; using System.ComponentModel; using System.Data; using System.Globalization; using System.Threading.Tasks; +using Npgsql.Properties; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; - using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; -/// -/// https://www.postgresql.org/docs/current/static/rangetypes.html -/// -class RangeTests : MultiplexingTestBase +class RangeTests : TestBase { - [Test, NUnit.Framework.Description("Resolves a range type handler via the different pathways")] - public async Task Range_resolution() - { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Range_resolution), // Prevent backend type caching in TypeHandlerRegistry - Pooling = false - }; - - using var conn = await OpenConnectionAsync(csb); - - // Resolve type by NpgsqlDbType - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.AddWithValue("p", NpgsqlDbType.Range | NpgsqlDbType.Integer, DBNull.Value); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - } - } - - // Resolve type by ClrType (type inference) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = new NpgsqlRange(3, 5) }); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - } - } - - // Resolve type by DataTypeName - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName="p", DataTypeName = "int4range", Value = DBNull.Value }); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - } - } - - // Resolve type by OID (read) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT int4range(3, 5)", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - Assert.That(reader.GetFieldValue>(0), Is.EqualTo(new NpgsqlRange(3, true, 5, false))); - } - } - - [Test] - public async Task Range() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3, @p4", conn); - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Range | NpgsqlDbType.Integer) { Value = NpgsqlRange.Empty }; - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = new NpgsqlRange(1, 10) }; - var p3 = new NpgsqlParameter { ParameterName = "p3", Value = new NpgsqlRange(1, false, 10, false) }; - var p4 = new NpgsqlParameter { ParameterName = "p4", Value = new NpgsqlRange(0, false, true, 10, false, false) }; - Assert.That(p2.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Range | NpgsqlDbType.Integer)); - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - cmd.Parameters.Add(p3); - cmd.Parameters.Add(p4); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - Assert.That(reader[0].ToString(), Is.EqualTo("empty")); - Assert.That(reader[1].ToString(), Is.EqualTo("[1,11)")); - Assert.That(reader[2].ToString(), Is.EqualTo("[2,10)")); - Assert.That(reader[3].ToString(), Is.EqualTo("(,10)")); - } - - [Test] - [NonParallelizable] - public async Task Range_with_long_subtype() - { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1 - }; - await using var conn = await OpenConnectionAsync(csb); - - var typeName = await GetTempTypeName(conn); - await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug - conn.ReloadTypes(); - Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); - - var value = new NpgsqlRange( - new string('a', conn.Settings.WriteBufferSize + 10), - new string('z', conn.Settings.WriteBufferSize + 10) - ); - - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Range | NpgsqlDbType.Text) { Value = value }); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); - await reader.ReadAsync(); - Assert.That(reader[0], Is.EqualTo(value)); - } + static readonly TestCaseData[] RangeTestCases = + [ + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range") + .SetName("IntegerRange"), + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int8range") + .SetName("BigIntRange"), + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange") + .SetName("NumericRange"), + new TestCaseData(new NpgsqlRange( + new DateTime(2020, 1, 1, 12, 0, 0), true, + new DateTime(2020, 1, 3, 13, 0, 0), false), + """["2020-01-01 12:00:00","2020-01-03 13:00:00")""", "tsrange") + .SetName("TimestampRange"), + // Note that the below text representations are local (according to TimeZone, which is set to Europe/Berlin in this test class), + // because that's how PG does timestamptz *text* representation. + new TestCaseData(new NpgsqlRange( + new DateTime(2020, 1, 1, 12, 0, 0, DateTimeKind.Utc), true, + new DateTime(2020, 1, 3, 13, 0, 0, DateTimeKind.Utc), false), + """["2020-01-01 13:00:00+01","2020-01-03 14:00:00+01")""", "tstzrange") + .SetName("TimestampTzRange"), + + // Note that numrange is a non-discrete range, and therefore doesn't undergo normalization to inclusive/exclusive in PG + new TestCaseData(NpgsqlRange.Empty, "empty", "numrange") + .SetName("EmptyRange"), + new TestCaseData(new NpgsqlRange(1, true, 10, true), "[1,10]", "numrange") + .SetName("Inclusive"), + new TestCaseData(new NpgsqlRange(1, false, 10, false), "(1,10)", "numrange") + .SetName("Exclusive"), + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange") + .SetName("InclusiveExclusive"), + new TestCaseData(new NpgsqlRange(1, false, 10, true), "(1,10]", "numrange") + .SetName("ExclusiveInclusive"), + new TestCaseData(new NpgsqlRange(1, false, true, 10, false, false), "(,10)", "numrange") + .SetName("InfiniteLowerBound"), + new TestCaseData(new NpgsqlRange(1, true, false, 10, false, true), "[1,)", "numrange") + .SetName("InfiniteUpperBound") + ]; + + // See more test cases in DateTimeTests + [Test, TestCaseSource(nameof(RangeTestCases))] + public Task Range(T range, string sqlLiteral, string dataTypeName) + => AssertType(range, sqlLiteral, dataTypeName, + // NpgsqlRange[] is mapped to multirange by default, not array, so the built-in AssertType testing for arrays fails + // (see below) + skipArrayCheck: true); + + // This re-executes the same scenario as above, but with isDefaultForWriting: false and without skipArrayCheck: true. + // This tests coverage of range arrays (as opposed to multiranges). + [Test, TestCaseSource(nameof(RangeTestCases))] + public Task Range_array(T range, string sqlLiteral, string dataTypeName) + => AssertType(range, sqlLiteral, dataTypeName, dataTypeInference: DataTypeInference.Mismatch); [Test] public void Equality_finite() @@ -134,23 +72,23 @@ public void Equality_finite() //different bounds var r2 = new NpgsqlRange(1, true, false, 2, false, false); - Assert.IsFalse(r1 == r2); + Assert.That(r1 == r2, Is.False); //lower bound is not inclusive var r3 = new NpgsqlRange(0, false, false, 1, false, false); - Assert.IsFalse(r1 == r3); + Assert.That(r1 == r3, Is.False); //upper bound is inclusive var r4 = new NpgsqlRange(0, true, false, 1, true, false); - Assert.IsFalse(r1 == r4); + Assert.That(r1 == r4, Is.False); var r5 = new NpgsqlRange(0, true, false, 1, false, false); - Assert.IsTrue(r1 == r5); + Assert.That(r1 == r5); //check some other combinations while we are here - Assert.IsFalse(r2 == r3); - Assert.IsFalse(r2 == r4); - Assert.IsFalse(r3 == r4); + Assert.That(r2 == r3, Is.False); + Assert.That(r2 == r4, Is.False); + Assert.That(r3 == r4, Is.False); } [Test] @@ -158,22 +96,22 @@ public void Equality_infinite() { var r1 = new NpgsqlRange(0, false, true, 1, false, false); - //different upper bound (lower bound shoulnd't matter since it is infinite) + //different upper bound (lower bound shouldn't matter since it is infinite) var r2 = new NpgsqlRange(1, false, true, 2, false, false); - Assert.IsFalse(r1 == r2); + Assert.That(r1 == r2, Is.False); //upper bound is inclusive var r3 = new NpgsqlRange(0, false, true, 1, true, false); - Assert.IsFalse(r1 == r3); + Assert.That(r1 == r3, Is.False); //value of lower bound shouldn't matter since it is infinite var r4 = new NpgsqlRange(10, false, true, 1, false, false); - Assert.IsTrue(r1 == r4); + Assert.That(r1 == r4); //check some other combinations while we are here - Assert.IsFalse(r2 == r3); - Assert.IsFalse(r2 == r4); - Assert.IsFalse(r3 == r4); + Assert.That(r2 == r3, Is.False); + Assert.That(r2 == r4, Is.False); + Assert.That(r3 == r4, Is.False); } [Test] @@ -183,12 +121,12 @@ public void GetHashCode_value_types() NpgsqlRange b = NpgsqlRange.Empty; NpgsqlRange c = NpgsqlRange.Parse("(,)"); - Assert.IsFalse(a.Equals(b)); - Assert.IsFalse(a.Equals(c)); - Assert.IsFalse(b.Equals(c)); - Assert.AreNotEqual(a.GetHashCode(), b.GetHashCode()); - Assert.AreNotEqual(a.GetHashCode(), c.GetHashCode()); - Assert.AreNotEqual(b.GetHashCode(), c.GetHashCode()); + Assert.That(a.Equals(b), Is.False); + Assert.That(a.Equals(c), Is.False); + Assert.That(b.Equals(c), Is.False); + Assert.That(b.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(b.GetHashCode())); } [Test] @@ -198,12 +136,12 @@ public void GetHashCode_reference_types() NpgsqlRange b = NpgsqlRange.Empty; NpgsqlRange c = NpgsqlRange.Parse("(,)"); - Assert.IsFalse(a.Equals(b)); - Assert.IsFalse(a.Equals(c)); - Assert.IsFalse(b.Equals(c)); - Assert.AreNotEqual(a.GetHashCode(), b.GetHashCode()); - Assert.AreNotEqual(a.GetHashCode(), c.GetHashCode()); - Assert.AreNotEqual(b.GetHashCode(), c.GetHashCode()); + Assert.That(a.Equals(b), Is.False); + Assert.That(a.Equals(c), Is.False); + Assert.That(b.Equals(c), Is.False); + Assert.That(b.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(b.GetHashCode())); } [Test] @@ -225,6 +163,59 @@ public async Task TimestampTz_range_with_DateTimeOffset() Assert.That(actual, Is.EqualTo(range)); } + [Test] + public async Task Unmapped_range_with_mapped_subtype() + { + await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes().ConnectionStringBuilder.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + + var typeName = await GetTempTypeName(conn); + await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); + conn.ReloadTypes(); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + + var value = new NpgsqlRange( + new string('a', conn.Settings.WriteBufferSize + 10).ToCharArray(), + new string('z', conn.Settings.WriteBufferSize + 10).ToCharArray() + ); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { DataTypeName = typeName, ParameterName = "p", Value = value }); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await reader.ReadAsync(); + + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(NpgsqlRange))); + var result = reader.GetFieldValue>(0); + Assert.That(result, Is.EqualTo(value).Using>((actual, expected) => + actual.LowerBound!.SequenceEqual(expected.LowerBound!) && actual.UpperBound!.SequenceEqual(expected.UpperBound!))); + } + + [Test] + public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() + { + await using var connection = await DataSource.OpenConnectionAsync(); + var rangeType = await GetTempTypeName(connection); + await connection.ExecuteNonQueryAsync($"CREATE TYPE {rangeType} AS RANGE(subtype=text)"); + await connection.ReloadTypesAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite(new NpgsqlRange("bar", "foo"), rangeType); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead("""["bar","foo"]""", rangeType); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead>("""["bar","foo"]""", rangeType); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4441")] public async Task Array_of_range() { @@ -248,20 +239,41 @@ await AssertType( new(3, lowerBoundIsInclusive: true, 4, upperBoundIsInclusive: false), new(5, lowerBoundIsInclusive: true, 6, upperBoundIsInclusive: false) }, - @"{""[3,4)"",""[5,6)""}", + """{"[3,4)","[5,6)"}""", "int4range[]", - NpgsqlDbType.IntegerRange | NpgsqlDbType.Array, - isDefaultForWriting: !supportsMultirange, - isNpgsqlDbTypeInferredFromClrType: false); + dataTypeInference: supportsMultirange ? DataTypeInference.Mismatch : DataTypeInference.Match); } - [OneTimeSetUp] - public async Task OneTimeSetUp() + [Test] + public async Task Ranges_not_supported_by_default_on_NpgsqlSlimSourceBuilder() { - using var conn = await OpenConnectionAsync(); - MinimumPgVersion(conn, "9.2.0"); + var errorMessage = string.Format( + NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = await AssertTypeUnsupportedRead>("[1,10)", "int4range", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedWrite(new NpgsqlRange(1, true, 10, false), "int4range", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableRanges() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRanges(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", skipArrayCheck: true); + } + + protected override NpgsqlConnection OpenConnection() + => throw new NotSupportedException(); + #region ParseTests [Theory] @@ -270,7 +282,7 @@ public void Roundtrip_DateTime_ranges_through_ToString_and_Parse(NpgsqlRange.Parse(wellKnownText); - Assert.AreEqual(input, result); + Assert.That(result, Is.EqualTo(input)); } [Theory] @@ -280,7 +292,7 @@ public void Roundtrip_DateTime_ranges_through_ToString_and_Parse(NpgsqlRange.Parse(value); - Assert.AreEqual(NpgsqlRange.Empty, result); + Assert.That(result, Is.EqualTo(NpgsqlRange.Empty)); } [Theory] @@ -292,7 +304,7 @@ public void Parse_empty(string value) public void Roundtrip_int_ranges_through_ToString_and_Parse(string input) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(input.Replace(" ", null), result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(input.Replace(" ", null))); } [Theory] @@ -312,7 +324,7 @@ public void Roundtrip_int_ranges_through_ToString_and_Parse(string input) public void Int_range_Parse_ToString_returns_normalized_representations(string input, string normalized) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(normalized, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(normalized)); } [Theory] @@ -332,7 +344,7 @@ public void Int_range_Parse_ToString_returns_normalized_representations(string i public void Nullable_int_range_Parse_ToString_returns_normalized_representations(string input, string normalized) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(normalized, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(normalized)); } [Theory] @@ -343,7 +355,7 @@ public void Nullable_int_range_Parse_ToString_returns_normalized_representations public void String_range_Parse_ToString_returns_normalized_representations(string input, string normalized) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(normalized, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(normalized)); } [Theory] @@ -351,7 +363,7 @@ public void String_range_Parse_ToString_returns_normalized_representations(strin public void Roundtrip_string_ranges_through_ToString_and_Parse2(string input) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(input, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(input)); } [Theory] @@ -370,12 +382,12 @@ public void TypeConverter() var converter = TypeDescriptor.GetConverter(typeof(NpgsqlRange)); // Act - Assert.IsInstanceOf.RangeTypeConverter>(converter); - Assert.IsTrue(converter.CanConvertFrom(typeof(string))); + Assert.That(converter, Is.InstanceOf.RangeTypeConverter>()); + Assert.That(converter.CanConvertFrom(typeof(string))); var result = converter.ConvertFromString("empty"); // Assert - Assert.AreEqual(NpgsqlRange.Empty, result); + Assert.That(result, Is.EqualTo(NpgsqlRange.Empty)); } #endregion @@ -388,14 +400,10 @@ class SimpleType string? Value { get; } SimpleType(string? value) - { - Value = value; - } + => Value = value; public override string? ToString() - { - return Value; - } + => Value; class SimpleTypeConverter : TypeConverter { @@ -420,31 +428,40 @@ public override object ConvertFrom(ITypeDescriptorContext? context, CultureInfo? new object[][] { // (2018-05-17, 2018-05-18) - new object[] { new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, false) }, + [new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, false)], // [2018-05-17, 2018-05-18] - new object[] { new NpgsqlRange(May_17_2018, true, false, May_18_2018, true, false) }, + [new NpgsqlRange(May_17_2018, true, false, May_18_2018, true, false)], // [2018-05-17, 2018-05-18) - new object[] { new NpgsqlRange(May_17_2018, true, false, May_18_2018, false, false) }, + [new NpgsqlRange(May_17_2018, true, false, May_18_2018, false, false)], // (2018-05-17, 2018-05-18] - new object[] { new NpgsqlRange(May_17_2018, false, false, May_18_2018, true, false) }, + [new NpgsqlRange(May_17_2018, false, false, May_18_2018, true, false)], // (,) - new object[] { new NpgsqlRange(default, false, true, default, false, true) }, - new object[] { new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, true) }, + [new NpgsqlRange(default, false, true, default, false, true)], + [new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, true)], // (2018-05-17,) - new object[] { new NpgsqlRange(May_17_2018, false, false, default, false, true) }, - new object[] { new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, true) }, + [new NpgsqlRange(May_17_2018, false, false, default, false, true)], + [new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, true)], // (,2018-05-18) - new object[] { new NpgsqlRange(default, false, true, May_18_2018, false, false) }, - new object[] { new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, false) } + [new NpgsqlRange(default, false, true, May_18_2018, false, false)], + [new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, false)] }; #endregion - public RangeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + protected override NpgsqlDataSource DataSource { get; } + + public RangeTests() + => DataSource = CreateDataSource(builder => + { + builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + }); + + [OneTimeTearDown] + public void TearDown() => DataSource.Dispose(); } diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs new file mode 100644 index 0000000000..2fd330badf --- /dev/null +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -0,0 +1,155 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using Npgsql.Properties; +using NUnit.Framework; +using NUnit.Framework.Constraints; + +namespace Npgsql.Tests.Types; + +public class RecordTests : TestBase +{ + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/724")] + [IssueLink("https://github.com/npgsql/npgsql/issues/1980")] + public async Task Read_Record_as_object_array() + { + var recordLiteral = "(1,'foo'::text)::record"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + + var record = (object[])reader[0]; + Assert.That(record[0], Is.EqualTo(1)); + Assert.That(record[1], Is.EqualTo("foo")); + + var array = (object[][])reader[1]; + Assert.That(array.Length, Is.EqualTo(2)); + Assert.That(array[0][0], Is.EqualTo(1)); + Assert.That(array[1][0], Is.EqualTo(1)); + } + + [Test] + public async Task Read_Record_as_ValueTuple() + { + await using var dataSource = CreateDataSource(b => b.EnableRecordsAsTuples()); + await using var conn = await dataSource.OpenConnectionAsync(); + + var recordLiteral = "(1,'foo'::text)::record"; + await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + + var record = reader.GetFieldValue<(int, string)>(0); + Assert.That(record.Item1, Is.EqualTo(1)); + Assert.That(record.Item2, Is.EqualTo("foo")); + + var array = reader.GetFieldValue<(int, string)[]>(1); + Assert.That(array.Length, Is.EqualTo(2)); + Assert.That(array[0].Item1, Is.EqualTo(1)); + Assert.That(array[0].Item2, Is.EqualTo("foo")); + Assert.That(array[1].Item1, Is.EqualTo(1)); + Assert.That(array[1].Item2, Is.EqualTo("foo")); + } + + [Test] + public async Task Read_Record_as_Tuple() + { + await using var dataSource = CreateDataSource(b => b.EnableRecordsAsTuples()); + await using var conn = await dataSource.OpenConnectionAsync(); + + var recordLiteral = "(1,'foo'::text)::record"; + await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + + var record = reader.GetFieldValue>(0); + Assert.That(record.Item1, Is.EqualTo(1)); + Assert.That(record.Item2, Is.EqualTo("foo")); + + var array = reader.GetFieldValue[]>(1); + Assert.That(array.Length, Is.EqualTo(2)); + Assert.That(array[0].Item1, Is.EqualTo(1)); + Assert.That(array[0].Item2, Is.EqualTo("foo")); + Assert.That(array[1].Item1, Is.EqualTo(1)); + Assert.That(array[1].Item2, Is.EqualTo("foo")); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1238")] + public async Task Record_with_non_int_field() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT ('one'::TEXT, 2)", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + var record = reader.GetFieldValue(0); + Assert.That(record[0], Is.EqualTo("one")); + Assert.That(record[1], Is.EqualTo(2)); + } + + [Test] + public async Task As_ValueTuple_supported_only_with_EnableRecordsAsTuples() + { + await using var connection = await DataSource.OpenConnectionAsync(); + await using var command = new NpgsqlCommand("SELECT (1, 'foo')::record", connection); + await using var reader = await command.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples), + nameof(NpgsqlDataSourceBuilder), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); + + var exception = Assert.Throws(() => reader.GetFieldValue<(int, string)>(0))!; + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task Records_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + // RecordHandler doesn't support writing, so we only check for reading + cmd.CommandText = "SELECT ('one'::text, 2)"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples), + nameof(NpgsqlSlimDataSourceBuilder), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); + + var exception = Assert.Throws(() => reader.GetValue(0))!; + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = Assert.Throws(() => reader.GetFieldValue(0))!; + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableRecords() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRecords(); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + // RecordHandler doesn't support writing, so we only check for reading + cmd.CommandText = "SELECT ('one'::text, 2)"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + Assert.That(() => reader.GetValue(0), Throws.Nothing); + Assert.That(() => reader.GetFieldValue(0), Throws.Nothing); + } +} diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index e787122e7d..22403aa3d4 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -1,5 +1,6 @@ -using System; +using System; using System.Data; +using System.IO; using System.Text; using System.Threading.Tasks; using NpgsqlTypes; @@ -14,39 +15,55 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-character.html /// -public class TextTests : MultiplexingTestBase +public class TextTests : TestBase { [Test] public Task Text_as_string() - => AssertType("foo", "foo", "text", NpgsqlDbType.Text, DbType.String); + => AssertType("foo", "foo", "text", dbType: DbType.String); [Test] public Task Text_as_array_of_chars() - => AssertType("foo".ToCharArray(), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefaultForReading: false); + => AssertType("foo".ToCharArray(), "foo", "text", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.String, valueTypeEqualsFieldType: false); [Test] public Task Text_as_ArraySegment_of_chars() - => AssertTypeWrite(new ArraySegment("foo".ToCharArray()), "foo", "text", NpgsqlDbType.Text, DbType.String, - isDefault: false); + => AssertTypeWrite(new ArraySegment("foo".ToCharArray()), "foo", "text", dbType: DbType.String); [Test] public Task Text_as_array_of_bytes() - => AssertType(Encoding.UTF8.GetBytes("foo"), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefault: false); + => AssertType("foo"u8.ToArray(), "foo", "text", dataTypeInference: DataTypeInference.Mismatch, + new(DbType.String, DbType.Binary), valueTypeEqualsFieldType: false); + + [Test] + public Task Text_as_ReadOnlyMemory_of_bytes() + => AssertTypeWrite(new ReadOnlyMemory("foo"u8.ToArray()), "foo", + "text", dataTypeInference: DataTypeInference.Mismatch, + new(DbType.String, DbType.Binary)); [Test] public Task Char_as_char() - => AssertType('f', "f", "character", NpgsqlDbType.Char, inferredDbType: DbType.String, isDefault: false); + => AssertType('f', "f", + "character", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.String, valueTypeEqualsFieldType: false, skipArrayCheck: true); // char[] maps to text [Test] - [NonParallelizable] public async Task Citext_as_string() { await using var conn = await OpenConnectionAsync(); await EnsureExtensionAsync(conn, "citext"); - await AssertType("foo", "foo", "citext", NpgsqlDbType.Citext, inferredDbType: DbType.String, isDefaultForWriting: false); + await AssertType("foo", "foo", + "citext", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.String); } + [Test] + public Task Text_as_MemoryStream() + => AssertTypeWrite(() => new MemoryStream("foo"u8.ToArray()), "foo", + "text", dataTypeInference: DataTypeInference.Mismatch, + new(DbType.String, DbType.Binary)); + [Test] public async Task Text_long() { @@ -55,7 +72,7 @@ public async Task Text_long() builder.Append('X', conn.Settings.WriteBufferSize); var value = builder.ToString(); - await AssertType(value, value, "text", NpgsqlDbType.Text, DbType.String); + await AssertType(value, value, "text", dbType: DbType.String); } [Test, Description("Tests that strings are truncated when the NpgsqlParameter's Size is set")] @@ -74,6 +91,7 @@ public async Task Truncate() Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2.Substring(0, 4))); // NpgsqlParameter.Size larger than the value size should mean the value size, as well as 0 and -1 + p.Value = data2; p.Size = data2.Length + 10; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2)); p.Size = 0; @@ -92,10 +110,10 @@ public async Task Null_character() } [Test, Description("Tests some types which are aliased to strings")] - [TestCase("character varying", NpgsqlDbType.Varchar)] - [TestCase("name", NpgsqlDbType.Name)] - public Task Aliased_postgres_types(string pgTypeName, NpgsqlDbType npgsqlDbType) - => AssertType("foo", "foo", pgTypeName, npgsqlDbType, inferredDbType: DbType.String, isDefaultForWriting: false); + [TestCase("character varying")] + [TestCase("name")] + public Task Aliased_postgres_types(string dataTypeName) + => AssertType("foo", "foo", dataTypeName, dataTypeInference: DataTypeInference.Mismatch, dbType: DbType.String); [Test] [TestCase(DbType.AnsiString)] @@ -127,17 +145,15 @@ public async Task Internal_char() var expected = new char[] { 'a', (char)(256 - 3), 'b', (char)66, (char)230 }; for (var i = 0; i < expected.Length; i++) { - Assert.AreEqual(expected[i], reader.GetChar(i)); + Assert.That(reader.GetChar(i), Is.EqualTo(expected[i])); } var arr = (char[])reader.GetValue(5); var arr2 = (char[])reader.GetValue(6); - Assert.AreEqual(testArr.Length, arr.Length); + Assert.That(arr.Length, Is.EqualTo(testArr.Length)); for (var i = 0; i < arr.Length; i++) { - Assert.AreEqual(testArr[i], arr[i]); - Assert.AreEqual(testArr2[i], arr2[i]); + Assert.That(arr[i], Is.EqualTo(testArr[i])); + Assert.That(arr2[i], Is.EqualTo(testArr2[i])); } } - - public TextTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 690250aa68..4110a0856f 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -1,8 +1,5 @@ using System; -using System.Diagnostics; -using System.Globalization; using System.Net; -using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; @@ -11,31 +8,31 @@ namespace Npgsql.Tests; /// /// Tests NpgsqlTypes.* independent of a database /// -[TestFixture] public class TypesTests { +#pragma warning disable CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete [Test] public void TsVector() { NpgsqlTsVector vec; vec = NpgsqlTsVector.Parse("a"); - Assert.AreEqual("'a'", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'a'")); vec = NpgsqlTsVector.Parse("a "); - Assert.AreEqual("'a'", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'a'")); vec = NpgsqlTsVector.Parse("a:1A"); - Assert.AreEqual("'a':1A", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'a':1A")); vec = NpgsqlTsVector.Parse(@"\abc\def:1a "); - Assert.AreEqual("'abcdef':1A", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'abcdef':1A")); vec = NpgsqlTsVector.Parse(@"abc:3A 'abc' abc:4B 'hello''yo' 'meh\'\\':5"); - Assert.AreEqual(@"'abc':3A,4B 'hello''yo' 'meh''\\':5", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo(@"'abc':3A,4B 'hello''yo' 'meh''\\':5")); vec = NpgsqlTsVector.Parse(" a:12345C a:24D a:25B b c d 1 2 a:25A,26B,27,28"); - Assert.AreEqual("'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'")); } [Test] @@ -50,27 +47,27 @@ public void TsQuery() var str = query.ToString(); query = NpgsqlTsQuery.Parse("a & b | c"); - Assert.AreEqual("'a' & 'b' | 'c'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a' & 'b' | 'c'")); query = NpgsqlTsQuery.Parse("'a''':*ab&d:d&!c"); - Assert.AreEqual("'a''':*AB & 'd':D & !'c'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a''':*AB & 'd':D & !'c'")); query = NpgsqlTsQuery.Parse("(a & !(c | d)) & (!!a&b) | c | d | e"); - Assert.AreEqual("( ( 'a' & !( 'c' | 'd' ) & !( !'a' ) & 'b' | 'c' ) | 'd' ) | 'e'", query.ToString()); - Assert.AreEqual(query.ToString(), NpgsqlTsQuery.Parse(query.ToString()).ToString()); + Assert.That(query.ToString(), Is.EqualTo("( ( 'a' & !( 'c' | 'd' ) & !( !'a' ) & 'b' | 'c' ) | 'd' ) | 'e'")); + Assert.That(NpgsqlTsQuery.Parse(query.ToString()).ToString(), Is.EqualTo(query.ToString())); query = NpgsqlTsQuery.Parse("(((a:*)))"); - Assert.AreEqual("'a':*", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a':*")); query = NpgsqlTsQuery.Parse(@"'a\\b''cde'"); - Assert.AreEqual(@"a\b'cde", ((NpgsqlTsQueryLexeme)query).Text); - Assert.AreEqual(@"'a\\b''cde'", query.ToString()); + Assert.That(((NpgsqlTsQueryLexeme)query).Text, Is.EqualTo(@"a\b'cde")); + Assert.That(query.ToString(), Is.EqualTo(@"'a\\b''cde'")); query = NpgsqlTsQuery.Parse(@"a <-> b"); - Assert.AreEqual("'a' <-> 'b'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a' <-> 'b'")); query = NpgsqlTsQuery.Parse("((a & b) <5> c) <-> !d <0> e"); - Assert.AreEqual("( ( 'a' & 'b' <5> 'c' ) <-> !'d' ) <0> 'e'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("( ( 'a' & 'b' <5> 'c' ) <-> !'d' ) <0> 'e'")); Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("a b c & &")); Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("&")); @@ -87,6 +84,14 @@ public void TsQuery() Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("a b")); Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("a <-1> b")); } +#pragma warning restore CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete + + [Test] + public void TsVector_empty() + { + Assert.That(NpgsqlTsVector.Empty, Is.Empty); + Assert.That(NpgsqlTsVector.Empty.ToString(), Is.Empty); + } [Test] public void TsQueryEquatibility() @@ -162,27 +167,51 @@ public void TsQueryEquatibility() void AreEqual(NpgsqlTsQuery left, NpgsqlTsQuery right) { - Assert.True(left == right); - Assert.False(left != right); - Assert.AreEqual(left, right); - Assert.AreEqual(left.GetHashCode(), right.GetHashCode()); + Assert.That(left == right); + Assert.That(left != right, Is.False); + Assert.That(right, Is.EqualTo(left)); + Assert.That(right.GetHashCode(), Is.EqualTo(left.GetHashCode())); } void AreNotEqual(NpgsqlTsQuery left, NpgsqlTsQuery right) { - Assert.False(left == right); - Assert.True(left != right); - Assert.AreNotEqual(left, right); - Assert.AreNotEqual(left.GetHashCode(), right.GetHashCode()); + Assert.That(left == right, Is.False); + Assert.That(left != right); + Assert.That(right, Is.Not.EqualTo(left)); + Assert.That(right.GetHashCode(), Is.Not.EqualTo(left.GetHashCode())); } } +#pragma warning disable CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete [Test] public void TsQueryOperatorPrecedence() { var query = NpgsqlTsQuery.Parse("!a <-> b & c | d & e"); var expectedGrouping = NpgsqlTsQuery.Parse("((!(a) <-> b) & c) | (d & e)"); - Assert.AreEqual(expectedGrouping.ToString(), query.ToString()); + Assert.That(query.ToString(), Is.EqualTo(expectedGrouping.ToString())); + } +#pragma warning restore CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete + + [Test] + public void NpgsqlPath_empty() + => Assert.That(new NpgsqlPath { new(1, 2) }, Is.EqualTo(new NpgsqlPath(new NpgsqlPoint(1, 2)))); + + [Test] + public void NpgsqlPolygon_empty() + => Assert.That(new NpgsqlPolygon { new(1, 2) }, Is.EqualTo(new NpgsqlPolygon(new NpgsqlPoint(1, 2)))); + + [Test] + public void NpgsqlPath_default() + { + NpgsqlPath defaultPath = default; + Assert.That(defaultPath.Equals([new(1, 2)]), Is.False); + } + + [Test] + public void NpgsqlPolygon_default() + { + NpgsqlPolygon defaultPolygon = default; + Assert.That(defaultPolygon.Equals([new(1, 2)]), Is.False); } [Test] @@ -194,17 +223,50 @@ public void Bug1011018() var o = p.Value; } -#pragma warning disable 618 [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/750")] public void NpgsqlInet() { var v = new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 32); Assert.That(v.ToString(), Is.EqualTo("2001:1db8:85a3:1142:1000:8a2e:1370:7334/32")); + } -#pragma warning disable CS8625 - Assert.That(v != null); // #776 -#pragma warning disable CS8625 + [Test] + public void NpgsqlInet_parse_ipv4() + { + var ipv4 = new NpgsqlInet("192.168.1.1/8"); + Assert.That(ipv4.Address, Is.EqualTo(IPAddress.Parse("192.168.1.1"))); + Assert.That(ipv4.Netmask, Is.EqualTo(8)); + + ipv4 = new NpgsqlInet("192.168.1.1/32"); + Assert.That(ipv4.Address, Is.EqualTo(IPAddress.Parse("192.168.1.1"))); + Assert.That(ipv4.Netmask, Is.EqualTo(32)); + } + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/5638")] + public void NpgsqlInet_parse_ipv6() + { + var ipv6 = new NpgsqlInet("2001:0000:130F:0000:0000:09C0:876A:130B/32"); + Assert.That(ipv6.Address, Is.EqualTo(IPAddress.Parse("2001:0000:130F:0000:0000:09C0:876A:130B"))); + Assert.That(ipv6.Netmask, Is.EqualTo(32)); + + ipv6 = new NpgsqlInet("2001:0000:130F:0000:0000:09C0:876A:130B"); + Assert.That(ipv6.Address, Is.EqualTo(IPAddress.Parse("2001:0000:130F:0000:0000:09C0:876A:130B"))); + Assert.That(ipv6.Netmask, Is.EqualTo(128)); + } + + [Test] + public void NpgsqlInet_ToString_ipv4() + { + Assert.That(new NpgsqlInet("192.168.1.1/8").ToString(), Is.EqualTo("192.168.1.1/8")); + Assert.That(new NpgsqlInet("192.168.1.1/32").ToString(), Is.EqualTo("192.168.1.1")); + } + + [Test] + public void NpgsqlInet_ToString_ipv6() + { + Assert.That(new NpgsqlInet("2001:0:130f::9c0:876a:130b/32").ToString(), Is.EqualTo("2001:0:130f::9c0:876a:130b/32")); + Assert.That(new NpgsqlInet("2001:0:130f::9c0:876a:130b/128").ToString(), Is.EqualTo("2001:0:130f::9c0:876a:130b")); } -#pragma warning restore 618 -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index fe8dc2e7d5..3818b1f8fe 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -1,28 +1,49 @@ -using System.IO; +using System; +using System.IO; using Npgsql.Internal; -using Npgsql.Util; using NUnit.Framework; namespace Npgsql.Tests; -[NonParallelizable] // Parallel access to a single buffer +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] // Parallel access to a single buffer class WriteBufferTests { + [Test] + public void Buffered_full_buffer_no_flush() + { + WriteBuffer.WritePosition += WriteBuffer.WriteSpaceLeft - sizeof(int); + var writer = WriteBuffer.GetWriter(null!, FlushMode.NonBlocking); + Assert.That(writer.ShouldFlush(sizeof(int)), Is.False); + + Assert.DoesNotThrow(() => + { + Span intBytes = stackalloc byte[4]; + writer.WriteBytes(intBytes); + }); + } + + [Test] + public void GetWriter_Full_Buffer() + { + WriteBuffer.WritePosition += WriteBuffer.WriteSpaceLeft; + var writer = WriteBuffer.GetWriter(null!, FlushMode.Blocking); + Assert.That(writer.ShouldFlush(sizeof(byte)), Is.True); + writer.Flush(); + Assert.That(writer.ShouldFlush(sizeof(byte)), Is.False); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1275")] - public void Write_zero_characters() + public void Chunked_string_with_full_buffer() { // Fill up the buffer entirely WriteBuffer.WriteBytes(new byte[WriteBuffer.Size], 0, WriteBuffer.Size); Assert.That(WriteBuffer.WriteSpaceLeft, Is.Zero); - int charsUsed; - bool completed; - WriteBuffer.WriteStringChunked("hello", 0, 5, true, out charsUsed, out completed); - Assert.That(charsUsed, Is.Zero); - Assert.That(completed, Is.False); - WriteBuffer.WriteStringChunked("hello".ToCharArray(), 0, 5, true, out charsUsed, out completed); - Assert.That(charsUsed, Is.Zero); - Assert.That(completed, Is.False); + var data = new string('a', WriteBuffer.Size) + "hello"; + var byteLength = WriteBuffer.TextEncoding.GetByteCount(data); + WriteBuffer.WriteString(data, byteLength, false); + Assert.That(WriteBuffer.WritePosition, Is.EqualTo(5)); + Assert.That(WriteBuffer.Buffer.AsSpan(0, 5).ToArray(), Is.EqualTo(new byte[] { (byte)'h', (byte)'e', (byte)'l', (byte)'l', (byte)'o' })); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] @@ -31,26 +52,11 @@ public void Chunked_string_encoding_fits() WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1], 0, WriteBuffer.Size - 1); Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - var charsUsed = 1; - var completed = true; - // This unicode character is three bytes when encoded in UTF8 - Assert.That(() => WriteBuffer.WriteStringChunked("\uD55C", 0, 1, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); - } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] - public void Chunked_byte_array_encoding_fits() - { - WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1], 0, WriteBuffer.Size - 1); - Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - - var charsUsed = 1; - var completed = true; // This unicode character is three bytes when encoded in UTF8 - Assert.That(() => WriteBuffer.WriteStringChunked("\uD55C".ToCharArray(), 0, 1, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); + var data = "\uD55C" + new string('a', WriteBuffer.Size); + var byteLength = WriteBuffer.TextEncoding.GetByteCount(data); + WriteBuffer.WriteString(data, byteLength, false); + Assert.That(WriteBuffer.WritePosition, Is.EqualTo(3)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3733")] @@ -59,38 +65,19 @@ public void Chunked_string_encoding_fits_with_surrogates() WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1]); Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - var charsUsed = 1; - var completed = true; - var cyclone = "🌀"; - - Assert.That(() => WriteBuffer.WriteStringChunked(cyclone, 0, cyclone.Length, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); - } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3733")] - public void Chunked_char_array_encoding_fits_with_surrogates() - { - WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1]); - Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - - var charsUsed = 1; - var completed = true; - var cyclone = "🌀"; - - Assert.That(() => WriteBuffer.WriteStringChunked(cyclone.ToCharArray(), 0, cyclone.Length, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); + var cyclone = "🌀" + new string('a', WriteBuffer.Size); + var byteLength = WriteBuffer.TextEncoding.GetByteCount(cyclone); + WriteBuffer.WriteString(cyclone, byteLength, false); + Assert.That(WriteBuffer.WritePosition, Is.EqualTo(4)); } -#pragma warning disable CS8625 [SetUp] public void SetUp() { Underlying = new MemoryStream(); - WriteBuffer = new NpgsqlWriteBuffer(null, Underlying, null, NpgsqlReadBuffer.DefaultSize, PGUtil.UTF8Encoding); + WriteBuffer = new NpgsqlWriteBuffer(null, Underlying, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding); + WriteBuffer.MessageLengthValidation = false; } -#pragma warning restore CS8625 // ReSharper disable once InconsistentNaming NpgsqlWriteBuffer WriteBuffer = default!; diff --git a/test/Npgsql.Tests/WriteStateTests.cs b/test/Npgsql.Tests/WriteStateTests.cs new file mode 100644 index 0000000000..b1235e4c45 --- /dev/null +++ b/test/Npgsql.Tests/WriteStateTests.cs @@ -0,0 +1,506 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using Npgsql.Internal; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests; + +/// +/// Tests that pin the write-state propagation and disposal contracts between NpgsqlParameter, +/// PgTypeInfo providers, and the converters they produce. +/// +public class WriteStateTests : TestBase +{ + [Test] + public async Task Nullable_array_write_state_flows([Values] bool fixedSize) + { + // Verifies that provider-produced write state flows through IsDbNull and Write + // for nullable array elements, both fixed-size and variable-size. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.AddTypeInfoResolverFactory(new WriteStateTrackingResolverFactory(fixedSize, tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] { 1, 2, 3 }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = input, DataTypeName = "integer[]" }); + await cmd.ExecuteNonQueryAsync(); + + if (fixedSize) + Assert.That(tracker.IsDbNullWriteStateReceived, Is.True, "IsDbNullValue did not receive write state"); + Assert.That(tracker.WriteWriteStateReceived, Is.True, "Write did not receive write state"); + } + + [Test] + public async Task Object_array_write_state_flows_through_late_bound_element([Values] bool fixedSize) + { + // Verifies write state propagation through two layers with mixed element shapes: + // outer: ArrayTypeInfoProvider + // inner: LateBoundTypeInfoProvider (object -> int or DBNull) + // int path -> WriteStateTrackingProvider (per-element wrapped WriteState) + // null path -> PgSerializerOptions.UnspecifiedDBNullTypeInfo (different concrete info entirely) + // The tracking int converter's IsDbNullValue and WriteCore must see the provider-produced + // write state after passing through the array + ObjectConverter layers, while the DBNull + // slots must flow through without disturbing the non-null slots. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.AddTypeInfoResolverFactory(new WriteStateTrackingResolverFactory(fixedSize, tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new object[] { 1, DBNull.Value, 2, DBNull.Value, 3 }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = input, DataTypeName = "integer[]" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.IsDbNullWriteStateReceived, Is.True, + "IsDbNullValue did not receive write state after array + late-bound object layers"); + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after array + late-bound object layers"); + } + + [Test] + public async Task Range_write_state_flows() + { + // Verifies write state propagation through a range composition: + // RangeConverter -> tracking int subtype (GetSize populates writeState) + // The range converter must carry each bound's subtype state into BeginNestedWrite so the subtype's + // WriteCore observes the provider-produced sentinel. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new NpgsqlRange(1, 10); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4range" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after range -> subtype composition"); + } + + [Test] + public async Task Multirange_write_state_flows() + { + // Verifies write state propagation through a multirange composition: + // MultirangeConverter[], NpgsqlRange> -> RangeConverter -> tracking int subtype + // Three layers: multirange stores per-range state, range stores per-bound state, subtype populates bound state. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.EnableMultiranges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] { new NpgsqlRange(1, 10), new NpgsqlRange(20, 30) }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter[]> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4multirange" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after multirange -> range -> subtype composition"); + } + + [Test] + public async Task Range_array_write_state_flows() + { + // Verifies write state propagation through an array-over-range composition: + // ArrayConverter[]> -> RangeConverter -> tracking int subtype + // The per-element array slot carries the range's WriteState, which itself nests the subtype state. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] { new NpgsqlRange(1, 10), new NpgsqlRange(20, 30) }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter[]> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4range[]" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after array -> range -> subtype composition"); + } + + [Test] + public async Task Array_of_multirange_write_state_flows() + { + // Verifies write state propagation through an array-over-multirange composition (four layers): + // ArrayConverter[][]> -> MultirangeConverter -> RangeConverter -> tracking int subtype + // The deepest common composition shape — if any layer loses state, the subtype's WriteCore never sees it. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.EnableMultiranges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] + { + new[] { new NpgsqlRange(1, 10), new NpgsqlRange(20, 30) }, + new[] { new NpgsqlRange(40, 50) } + }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter[][]> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4multirange[]" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after array -> multirange -> range -> subtype composition"); + } + + [Test] + public async Task Composite_write_state_flows() + { + // Verifies write state propagation through a composite composition: + // CompositeConverter -> tracking int4 field converter + // The composite's per-field WriteState storage must carry the subtype's sentinel into BeginNestedWrite. + // Uses a real PG CREATE TYPE + MapComposite so the CompositeConverter is constructed by the production path. + var tracker = new WriteStateTracker(); + await using var adminConnection = await OpenConnectionAsync(); + var type = await TestUtil.GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.AddTypeInfoResolverFactory(new CompositeFieldWriteStateTrackingResolverFactory(tracker)); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand("SELECT @p", connection); + cmd.Parameters.Add(new NpgsqlParameter + { + ParameterName = "p", + Value = new CompositeWithInt { X = 42 }, + DataTypeName = type + }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after composite -> field subtype composition"); + } + + [Test] + public async Task Execute_disposes_write_state() + { + // Verifies that write state produced during ResolveTypeInfo (and carried through Bind/Write) is disposed + // once the normal execution path finishes, via ResetBindingInfo in the Write finally block. + var tracker = new DisposalTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.AddTypeInfoResolverFactory(new DisposableWriteStateResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = 42, DataTypeName = "integer" }); + + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.Disposed, Is.True, "provider-produced write state was not disposed after normal execution"); + } + + [Test] + public async Task SchemaOnly_disposes_resolution_write_state() + { + // Verifies that write state produced during ResolveTypeInfo is disposed when Bind is skipped + // (e.g. CommandBehavior.SchemaOnly), so provider-allocated state does not leak. + var tracker = new DisposalTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.AddTypeInfoResolverFactory(new DisposableWriteStateResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = 42, DataTypeName = "integer" }); + + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); + + Assert.That(tracker.Disposed, Is.True, "provider-produced write state was not disposed after SchemaOnly execution"); + } + + sealed class WriteStateTracker + { + public bool IsDbNullWriteStateReceived; + public bool WriteWriteStateReceived; + } + + sealed class WriteStateTrackingConverter(bool fixedSize, WriteStateTracker tracker, bool generatesWriteState = false) + : PgBufferedConverter(customDbNullPredicate: true) + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = fixedSize ? BufferRequirements.CreateFixedSize(sizeof(int)) : BufferRequirements.Create(Size.CreateUpperBound(sizeof(int))); + return format is DataFormat.Binary; + } + + protected override bool IsDbNullValue(int value, object? writeState) + { + if (writeState is not null) + tracker.IsDbNullWriteStateReceived = true; + return false; + } + + protected override int ReadCore(PgReader reader) => reader.ReadInt32(); + + protected override void WriteCore(PgWriter writer, int value) + { + if (writer.Current.WriteState is not null) + tracker.WriteWriteStateReceived = true; + writer.WriteInt32(value); + } + + public override Size GetSize(SizeContext context, int value, ref object? writeState) + { + // Range/Multirange call the subtype converter directly with a fresh null writeState, so for those tests the + // subtype must produce state from GetSize. For the array tests the provider has already populated non-null + // state and the ??= is a no-op, preserving existing behavior. + if (generatesWriteState) + writeState ??= "provider-state"; + return sizeof(int); + } + } + + sealed class WriteStateTrackingProvider(PgSerializerOptions options, bool fixedSize, WriteStateTracker tracker) : PgConcreteTypeInfoProvider + { + PgConcreteTypeInfo? _concreteTypeInfo; + + PgConcreteTypeInfo GetOrCreate() + => _concreteTypeInfo ??= new(options, new WriteStateTrackingConverter(fixedSize, tracker), options.GetCanonicalTypeId(DataTypeNames.Int4)); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) => GetOrCreate(); + + protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext context, int value, ref object? writeState) + { + writeState = "provider-state"; + return GetOrCreate(); + } + } + + sealed class WriteStateTrackingResolverFactory(bool fixedSize, WriteStateTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new Resolver(fixedSize, tracker); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + sealed class Resolver(bool fixedSize, WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new PgProviderTypeInfo(options, new WriteStateTrackingProvider(options, fixedSize, tracker), DataTypeNames.Int4); + + // object->int4 goes through LateBoundTypeInfoProvider which delegates back to the int resolver above, + // letting us exercise write-state propagation across the object (late-bound) element layer. + if (dataTypeName == DataTypeNames.Int4 && type == typeof(object)) + return new PgProviderTypeInfo(options, new LateBoundTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Int4)), DataTypeNames.Int4); + + return null; + } + } + + sealed class ArrayResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName != DataTypeNames.Int4.ToArrayName()) + return null; + + if (type == typeof(object[])) + { + var objectElementInfo = options.GetTypeInfo(typeof(object), DataTypeNames.Int4); + if (objectElementInfo is not PgProviderTypeInfo objectElementProviderTypeInfo) + return null; + + return new PgProviderTypeInfo(options, + new ArrayTypeInfoProvider(objectElementProviderTypeInfo, typeof(object[])), + dataTypeName); + } + + var elementInfo = options.GetTypeInfo(typeof(int), DataTypeNames.Int4); + if (elementInfo is not PgProviderTypeInfo providerTypeInfo) + return null; + + return new PgProviderTypeInfo(options, + new ArrayTypeInfoProvider(providerTypeInfo, typeof(int[])), + dataTypeName); + } + } + } + + sealed class DisposalTracker : IDisposable + { + public bool Disposed { get; private set; } + + public void Dispose() => Disposed = true; + } + + sealed class DisposableWriteStateConverter : PgBufferedConverter + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override int ReadCore(PgReader reader) => reader.ReadInt32(); + protected override void WriteCore(PgWriter writer, int value) => writer.WriteInt32(value); + } + + sealed class DisposableWriteStateProvider(PgSerializerOptions options, DisposalTracker tracker) : PgConcreteTypeInfoProvider + { + PgConcreteTypeInfo? _concreteTypeInfo; + + PgConcreteTypeInfo GetOrCreate() + => _concreteTypeInfo ??= new(options, new DisposableWriteStateConverter(), options.GetCanonicalTypeId(DataTypeNames.Int4)); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) => GetOrCreate(); + + protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext context, int value, ref object? writeState) + { + writeState = tracker; + return GetOrCreate(); + } + } + + sealed class DisposableWriteStateResolverFactory(DisposalTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new Resolver(tracker); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + sealed class Resolver(DisposalTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new PgProviderTypeInfo(options, new DisposableWriteStateProvider(options, tracker), DataTypeNames.Int4); + + return null; + } + } + } + + sealed class RangeWriteStateTrackingResolverFactory(WriteStateTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new NoOpResolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + public override IPgTypeInfoResolver? CreateRangeResolver() => new RangeResolver(tracker); + public override IPgTypeInfoResolver? CreateRangeArrayResolver() => new RangeArrayResolver(tracker); + public override IPgTypeInfoResolver? CreateMultirangeResolver() => new MultirangeResolver(tracker); + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver() => new MultirangeArrayResolver(tracker); + + sealed class NoOpResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => null; + } + + sealed class RangeResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Range && (type == typeof(NpgsqlRange) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + return new PgConcreteTypeInfo(options, range, options.GetCanonicalTypeId(DataTypeNames.Int4Range)); + } + return null; + } + } + + sealed class RangeArrayResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Range.ToArrayName() && (type == typeof(NpgsqlRange[]) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + var rangeInfo = new PgConcreteTypeInfo(options, range, options.GetCanonicalTypeId(DataTypeNames.Int4Range)); + var arrayConverter = ArrayConverter[]>.CreateArrayBased>(rangeInfo, typeof(NpgsqlRange[])); + return new PgConcreteTypeInfo(options, arrayConverter, options.GetCanonicalTypeId(DataTypeNames.Int4Range.ToArrayName())); + } + return null; + } + } + + sealed class MultirangeResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Multirange && (type == typeof(NpgsqlRange[]) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + var multirange = new MultirangeConverter[], NpgsqlRange>(range); + return new PgConcreteTypeInfo(options, multirange, options.GetCanonicalTypeId(DataTypeNames.Int4Multirange)); + } + return null; + } + } + + sealed class MultirangeArrayResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Multirange.ToArrayName() && (type == typeof(NpgsqlRange[][]) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + var multirange = new MultirangeConverter[], NpgsqlRange>(range); + var multirangeInfo = new PgConcreteTypeInfo(options, multirange, options.GetCanonicalTypeId(DataTypeNames.Int4Multirange)); + var arrayConverter = ArrayConverter[][]>.CreateArrayBased[]>(multirangeInfo, typeof(NpgsqlRange[][])); + return new PgConcreteTypeInfo(options, arrayConverter, options.GetCanonicalTypeId(DataTypeNames.Int4Multirange.ToArrayName())); + } + return null; + } + } + } + + class CompositeWithInt + { + public int X { get; set; } + } + + sealed class CompositeFieldWriteStateTrackingResolverFactory(WriteStateTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new Resolver(tracker); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + sealed class Resolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + { + var converter = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + return new PgConcreteTypeInfo(options, converter, options.GetCanonicalTypeId(DataTypeNames.Int4)); + } + return null; + } + } + } +} diff --git a/test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj b/test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj deleted file mode 100644 index de35192b4b..0000000000 --- a/test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj +++ /dev/null @@ -1,14 +0,0 @@ - - - exe - linux-x64 - true - link - - - - - - - - diff --git a/test/Npgsql.TrimmingTests/Program.cs b/test/Npgsql.TrimmingTests/Program.cs deleted file mode 100644 index 2d5a226288..0000000000 --- a/test/Npgsql.TrimmingTests/Program.cs +++ /dev/null @@ -1,16 +0,0 @@ -using System; -using Npgsql; - -var connectionString = Environment.GetEnvironmentVariable("NPGSQL_TEST_DB") - ?? "Server=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0"; - -await using var conn = new NpgsqlConnection(connectionString); -await conn.OpenAsync(); -await using var cmd = new NpgsqlCommand("SELECT 'Hello World'", conn); -await using var reader = await cmd.ExecuteReaderAsync(); -while (await reader.ReadAsync()) -{ - var value = reader.GetFieldValue(0); - if (value != "Hello World") - throw new Exception($"Got {value} instead of the expected 'Hello World'"); -}