@@ -240,13 +240,13 @@ void sqlcipher_deactivate() {
240240 optimized out by the compiler.
241241 Note: As suggested by Joachim Schipper (joachim.schipper@fox-it.com)
242242*/
243- void * sqlcipher_memset (void * v , unsigned char value , int len ) {
244- int i = 0 ;
243+ void * sqlcipher_memset (void * v , unsigned char value , u64 len ) {
244+ u64 i = 0 ;
245245 volatile unsigned char * a = v ;
246246
247247 if (v == NULL ) return v ;
248248
249- CODEC_TRACE_MEMORY ("sqlcipher_memset: setting %p[0-%d ]=%d)\n" , a , len , value );
249+ CODEC_TRACE_MEMORY ("sqlcipher_memset: setting %p[0-%llu ]=%d)\n" , a , len , value );
250250 for (i = 0 ; i < len ; i ++ ) {
251251 a [i ] = value ;
252252 }
@@ -257,9 +257,9 @@ void* sqlcipher_memset(void *v, unsigned char value, int len) {
257257/* constant time memory check tests every position of a memory segement
258258 matches a single value (i.e. the memory is all zeros)
259259 returns 0 if match, 1 of no match */
260- int sqlcipher_ismemset (const void * v , unsigned char value , int len ) {
260+ int sqlcipher_ismemset (const void * v , unsigned char value , u64 len ) {
261261 const unsigned char * a = v ;
262- int i = 0 , result = 0 ;
262+ u64 i = 0 , result = 0 ;
263263
264264 for (i = 0 ; i < len ; i ++ ) {
265265 result |= a [i ] ^ value ;
@@ -281,7 +281,7 @@ int sqlcipher_memcmp(const void *v0, const void *v1, int len) {
281281 return (result != 0 );
282282}
283283
284- void sqlcipher_mlock (void * ptr , int sz ) {
284+ void sqlcipher_mlock (void * ptr , u64 sz ) {
285285#ifndef OMIT_MEMLOCK
286286#if defined(__unix__ ) || defined(__APPLE__ )
287287 int rc ;
@@ -308,7 +308,7 @@ void sqlcipher_mlock(void *ptr, int sz) {
308308#endif
309309}
310310
311- void sqlcipher_munlock (void * ptr , int sz ) {
311+ void sqlcipher_munlock (void * ptr , u64 sz ) {
312312#ifndef OMIT_MEMLOCK
313313#if defined(__unix__ ) || defined(__APPLE__ )
314314 int rc ;
@@ -343,8 +343,8 @@ void sqlcipher_munlock(void *ptr, int sz) {
343343 * If sz is > 0, and not compiled with OMIT_MEMLOCK, system will attempt to unlock the
344344 * memory segment so it can be paged
345345 */
346- void sqlcipher_free (void * ptr , int sz ) {
347- CODEC_TRACE_MEMORY ("sqlcipher_free: calling sqlcipher_memset(%p,0,%d )\n" , ptr , sz );
346+ void sqlcipher_free (void * ptr , u64 sz ) {
347+ CODEC_TRACE_MEMORY ("sqlcipher_free: calling sqlcipher_memset(%p,0,%llu )\n" , ptr , sz );
348348 sqlcipher_memset (ptr , 0 , sz );
349349 sqlcipher_munlock (ptr , sz );
350350 sqlite3_free (ptr );
@@ -355,11 +355,11 @@ void sqlcipher_free(void *ptr, int sz) {
355355 * reference counted and leak detection works. Unless compiled with OMIT_MEMLOCK
356356 * attempts to lock the memory pages so sensitive information won't be swapped
357357 */
358- void * sqlcipher_malloc (int sz ) {
358+ void * sqlcipher_malloc (u64 sz ) {
359359 void * ptr ;
360- CODEC_TRACE_MEMORY ("sqlcipher_malloc: calling sqlite3Malloc(%d )\n" , sz );
360+ CODEC_TRACE_MEMORY ("sqlcipher_malloc: calling sqlite3Malloc(%llu )\n" , sz );
361361 ptr = sqlite3Malloc (sz );
362- CODEC_TRACE_MEMORY ("sqlcipher_malloc: calling sqlcipher_memset(%p,0,%d )\n" , ptr , sz );
362+ CODEC_TRACE_MEMORY ("sqlcipher_malloc: calling sqlcipher_memset(%p,0,%llu )\n" , ptr , sz );
363363 sqlcipher_memset (ptr , 0 , sz );
364364 sqlcipher_mlock (ptr , sz );
365365 return ptr ;
0 commit comments