C ++ Decimal value for binary, then use operation, then back to decimal

I have an array with numbers x: the set [] (long numbers) and the operations of the array char [] with numbers x-1. For each number from the sets [], its binary form (in 64 bits) will be the same as a set of numbers (these numbers are from 0 to 63), 1 and 0, representing whether it is inside the subset or not (1 2 4 will be 1 1 0 1 since 3 is missing)

ex: decimal 5 ---> 000 ... 00101, which means that this subset will contain only these last 2 numbers (# 63 and # 61)

now, using the characters that I get in operations [], I have to work with them and binary data of these numbers, as if they were operations on subsets (I hope that the subset is the right word), and these operations:

U = reunion ---> 101 U 010 = 111

A = intersection ---> 101 A 001 = 001

\ = A - B ---> 1110 - 0011 = 1100

/ = BA ---> as previous

so basically I had to read the numbers, make them binary, use them as if they were sets and use the operations accordingly, and then return the result of all these operations on them.

my code is:

include <iostream>

using namespace std;


void makeBinaryVector(int vec[64], long xx)  
{

// put xx in binary form in array "vec[]"
int k = 63;
long x = xx;

if(xx == 0)
    for(int i=0;i<64;i++)
        vec[i] = 0;


while(x != 0)
{
    vec[k] = x % 2;
    x = x / 2;
    k--;
}
}

void OperationInA(int A[64], char op, int B[64])
{
int i;
if(op == 'U')           //reunion
    for(i=0;i<64;i++)      
        if(B[i] == 1)
            A[i] = 1;

if(op == 'A')           //intersection
    for(i=0;i<64;i++)
    {
        if((B[i] == 1) && (A[i] == 1))
            A[i] = 1;
        else
            A[i] = 0;
    }

if(op == '\\')          //A-B
    for(i=0;i<64;i++)   
    {
        if( (A[i] == 0 && B[i] == 0) || (A[i] == 0 && B[i] == 1) )
            A[i] = 0;
        else

            if((A[i] == 1) && (B[i] == 1))
                A[i] = 0;
            else
                if((A[i] == 1) && (B[i] == 0))
                    A[i] = 1;
    }

if(op == '/')           //B-A
    for(i=0;i<64;i++)   
    {
        if(B[i] == 0)
            A[i] = 0;
        else

            if((B[i] == 1) && (A[i] == 0))
                A[i] = 1;
            else
                if((B[i] == 1) && (A[i] == 1))
                    A[i] = 0;
    }

}


unsigned long setOperations(long sets[], char operations[], unsigned int x)
{

unsigned int i = 1;     //not 0, since i'll be reading the 1st number separately
unsigned int j = 0;
unsigned int n = x;
int t;
long a = sets[0];
int A[64];              
for(t=0;t<64;t++)
    A[t] = 0;

makeBinaryVector(A, a);  //hold in A the first number, binary, and the results of operations
long b;
int B[64];
for(t=0;t<64;t++)      //Hold the next number in B[], in binary form
    B[t] = 0;

char op;

while(i < x && j < (x-1) )
{
    b = sets[i];

    makeBinaryVector(B, b);

    op = operations[j];

    OperationInA(A, op, B);

    i++; j++;
}

        //make array A a decimal number

unsigned int base = 1;
long nr = 0;
for(t=63; t>=0; t--)
{
    nr = nr + A[t] * base;
    base = base * 2;
}

return nr;
}

long sets[100];
char operations[100];
long n,i;

int main()
{


cin>>n;
for(i=0;i<n;i++)
    cin>>sets[i];

for(i=0;i<n-1;i++)
    cin>>operations[i];

cout<<setOperations(sets,operations,n);

return 0;

}

So, everything seems fine, except when I try to do this:

sets = {5, 2, 1} operations = {'U', '\'}

5 U 2 is 7 (111), and 7 \ 1 - 6 (111 - 001 = 110 → 6) the result should be 6, however, when I enter them like this, the result is 4 (??)

however, if I just enter {7.1} and {\}, the result is 6, as it should be. but if I introduce them, as I first mentioned {5,2,1} and {U,}, then it will output 4.

I can't understand what I'm doing wrong ...

+4
2

" ". , " ". .

"|", "&".

- :

if (op == 'A')
    result = a & b;
else if (op == 'U')
    result = a | b;
else if (op == '\\')
    result = a - b;
else if (op == '/')
    result = b - a;
+4

, @Hugal31.

, 32 , 64 . 64- long long 64- . sizeof . int - 4 (32 ), long long - 8 (64 ).

.. . , , . :

void makebinary(int vec[32], int x)
{
    int bitmask = 1;
    for (int i = 31; i >= 0; i--)
    {
        vec[i] = (x & bitmask) ? 1 : 0;
        bitmask <<= 1;
    }
}

. - :

int vx[32];
int vy[32];
makebinary(vx, x);
makebinary(vy, y);

int result = 0;
int j = 1;
for (int i = 31; i >= 0; i--)
{
    int n = (vx[i] & vy[i]) ? 1 : 0;
    result += n * j;
    j <<= 1;
}

, , , int result = X & Y;

+1

Source: https://habr.com/ru/post/1659283/


All Articles