@@ -844,32 +844,32 @@ static PyObject* connect_deltas(PyObject *self, PyObject *dstreams)
844844 tbw += cp_size ;
845845
846846 } else if (cmd ) {
847- // TODO: Compress nodes by parsing them in advance
848847 // Compression reduces fragmentation though, which is why we do it
849848 // in all cases.
849+ // It makes the more sense the more consecutive add-chunks we have,
850+ // its more likely in big deltas, for big binary files
850851 const uchar * add_start = data - 1 ;
851852 const uchar * add_end = dend ;
852853 ull num_bytes = cmd ;
853854 data += cmd ;
854855 ull num_chunks = 1 ;
855856 while (data < dend ){
856- fprintf ( stderr , "looping\n" );
857+ //while (0){
857858 const char c = * data ;
858859 if (c & 0x80 ){
859860 add_end = data ;
860861 break ;
861862 } else {
862- num_chunks += 1 ;
863- data += c + 1 ; // advance by 1 to skip add cmd
863+ data += 1 + c ; // advance by 1 to skip add cmd
864864 num_bytes += c ;
865+ num_chunks += 1 ;
865866 }
866867 }
867868
868- fprintf (stderr , "add bytes = %i\n" , (int )num_bytes );
869869 #ifdef DEBUG
870870 assert (add_end - add_start > 0 );
871871 if (num_chunks > 1 ){
872- fprintf (stderr , "Compression worked, got %i bytes of %i chunks\n" , (int )num_bytes , (int )num_chunks );
872+ fprintf (stderr , "Compression: got %i bytes of %i chunks\n" , (int )num_bytes , (int )num_chunks );
873873 }
874874 #endif
875875
@@ -881,12 +881,11 @@ static PyObject* connect_deltas(PyObject *self, PyObject *dstreams)
881881 uchar * dcdata = PyMem_Malloc (num_bytes );
882882 while (add_start < add_end ){
883883 const char bytes = * add_start ++ ;
884- fprintf (stderr , "Copying %i bytes\n" , bytes );
885884 memcpy ((void * )dcdata , (void * )add_start , bytes );
886885 dcdata += bytes ;
887886 add_start += bytes ;
888887 }
889- DC_set_data_with_ownership (dc , dcdata );
888+ DC_set_data_with_ownership (dc , dcdata - num_bytes );
890889 } else {
891890 DC_set_data (dc , data - cmd , cmd , is_shared_data );
892891 }
0 commit comments