### Diff for /gforth/prim between versions 1.164 and 1.170

version 1.164, 2005/01/26 22:06:03 version 1.170, 2005/03/17 18:49:03
Line 805  n = n1*n2; Line 805  n = n1*n2;

/       ( n1 n2 -- n )          core    slash  /       ( n1 n2 -- n )          core    slash
n = n1/n2;  n = n1/n2;
if(FLOORED_DIV && (n1 < 0) != (n2 < 0) && (n1%n2 != 0)) n--;  if(FLOORED_DIV && ((n1^n2) < 0) && (n1%n2 != 0)) n--;
:  :
/mod nip ;   /mod nip ;

mod     ( n1 n2 -- n )          core  mod     ( n1 n2 -- n )          core
n = n1%n2;  n = n1%n2;
if(FLOORED_DIV && (n1 < 0) != (n2 < 0) && n!=0) n += n2;  if(FLOORED_DIV && ((n1^n2) < 0) && n!=0) n += n2;
:  :
/mod drop ;   /mod drop ;

/mod    ( n1 n2 -- n3 n4 )              core            slash_mod  /mod    ( n1 n2 -- n3 n4 )              core            slash_mod
n4 = n1/n2;  n4 = n1/n2;
n3 = n1%n2; /* !! is this correct? look into C standard! */  n3 = n1%n2; /* !! is this correct? look into C standard! */
if (FLOORED_DIV && (n1<0) != (n2<0) && n3!=0) {  if (FLOORED_DIV && ((n1^n2) < 0) && n3!=0) {
n4--;    n4--;
n3+=n2;    n3+=n2;
}  }
Line 840  n5=DLO(r); Line 840  n5=DLO(r);
/* assumes that the processor uses either floored or symmetric division */  /* assumes that the processor uses either floored or symmetric division */
n5 = d/n3;  n5 = d/n3;
n4 = d%n3;  n4 = d%n3;
if (FLOORED_DIV && (d<0) != (n3<0) && n4!=0) {  if (FLOORED_DIV && ((DHI(d)^n3)<0) && n4!=0) {
n5--;    n5--;
n4+=n3;    n4+=n3;
}  }
Line 857  DCell d = (DCell)n1 * (DCell)n2; Line 857  DCell d = (DCell)n1 * (DCell)n2;
#endif  #endif
#ifdef BUGGY_LL_DIV  #ifdef BUGGY_LL_DIV
DCell r = fmdiv(d,n3);  DCell r = fmdiv(d,n3);
n4=DHI(r);  n4=DLO(r);
#else  #else
/* assumes that the processor uses either floored or symmetric division */  /* assumes that the processor uses either floored or symmetric division */
n4 = d/n3;  n4 = d/n3;
if (FLOORED_DIV && (d<0) != (n3<0) && (d%n3)!=0) n4--;  if (FLOORED_DIV && ((DHI(d)^n3)<0) && (d%n3)!=0) n4--;
#endif  #endif
:  :
*/mod nip ;   */mod nip ;
Line 886  n2 = n1>>1; Line 886  n2 = n1>>1;
fm/mod  ( d1 n1 -- n2 n3 )              core            f_m_slash_mod  fm/mod  ( d1 n1 -- n2 n3 )              core            f_m_slash_mod
""Floored division: @i{d1} = @i{n3}*@i{n1}+@i{n2}, @i{n1}>@i{n2}>=0 or 0>=@i{n2}>@i{n1}.""  ""Floored division: @i{d1} = @i{n3}*@i{n1}+@i{n2}, @i{n1}>@i{n2}>=0 or 0>=@i{n2}>@i{n1}.""
#ifdef BUGGY_LL_DIV  #ifdef BUGGY_LL_DIV
#ifdef ASM_SM_SLASH_REM
ASM_SM_SLASH_REM(d1.lo, d1.hi, n1, n2, n3);
if (((DHI(d1)^n1)<0) && n2!=0) {
n3--;
n2+=n1;
}
#else /* !defined(ASM_SM_SLASH_REM) */
DCell r = fmdiv(d1,n1);  DCell r = fmdiv(d1,n1);
n2=DHI(r);  n2=DHI(r);
n3=DLO(r);  n3=DLO(r);
#endif /* !defined(ASM_SM_SLASH_REM) */
#else  #else
#ifdef ASM_SM_SLASH_REM4
ASM_SM_SLASH_REM4(d1, n1, n2, n3);
if (((DHI(d1)^n1)<0) && n2!=0) {
n3--;
n2+=n1;
}
#else /* !defined(ASM_SM_SLASH_REM4) */
/* assumes that the processor uses either floored or symmetric division */  /* assumes that the processor uses either floored or symmetric division */
n3 = d1/n1;  n3 = d1/n1;
n2 = d1%n1;  n2 = d1%n1;
/* note that this 1%-3>0 is optimized by the compiler */  /* note that this 1%-3>0 is optimized by the compiler */
if (1%-3>0 && (d1<0) != (n1<0) && n2!=0) {  if (1%-3>0 && ((DHI(d1)^n1)<0) && n2!=0) {
n3--;    n3--;
n2+=n1;    n2+=n1;
}  }
#endif /* !defined(ASM_SM_SLASH_REM4) */
#endif  #endif
:  :
dup >r dup 0< IF  negate >r dnegate r>  THEN   dup >r dup 0< IF  negate >r dnegate r>  THEN
Line 908  if (1%-3>0 && (d1<0) != (n1<0) && n2!=0) Line 924  if (1%-3>0 && (d1<0) != (n1<0) && n2!=0)
sm/rem  ( d1 n1 -- n2 n3 )              core            s_m_slash_rem  sm/rem  ( d1 n1 -- n2 n3 )              core            s_m_slash_rem
""Symmetric division: @i{d1} = @i{n3}*@i{n1}+@i{n2}, sign(@i{n2})=sign(@i{d1}) or 0.""  ""Symmetric division: @i{d1} = @i{n3}*@i{n1}+@i{n2}, sign(@i{n2})=sign(@i{d1}) or 0.""
#ifdef BUGGY_LL_DIV  #ifdef BUGGY_LL_DIV
#ifdef ASM_SM_SLASH_REM
ASM_SM_SLASH_REM(d1.lo, d1.hi, n1, n2, n3);
#else /* !defined(ASM_SM_SLASH_REM) */
DCell r = smdiv(d1,n1);  DCell r = smdiv(d1,n1);
n2=DHI(r);  n2=DHI(r);
n3=DLO(r);  n3=DLO(r);
#endif /* !defined(ASM_SM_SLASH_REM) */
#else  #else
#ifdef ASM_SM_SLASH_REM4
ASM_SM_SLASH_REM4(d1, n1, n2, n3);
#else /* !defined(ASM_SM_SLASH_REM4) */
/* assumes that the processor uses either floored or symmetric division */  /* assumes that the processor uses either floored or symmetric division */
n3 = d1/n1;  n3 = d1/n1;
n2 = d1%n1;  n2 = d1%n1;
/* note that this 1%-3<0 is optimized by the compiler */  /* note that this 1%-3<0 is optimized by the compiler */
if (1%-3<0 && (d1<0) != (n1<0) && n2!=0) {  if (1%-3<0 && ((DHI(d1)^n1)<0) && n2!=0) {
n3++;    n3++;
n2-=n1;    n2-=n1;
}  }
#endif /* !defined(ASM_SM_SLASH_REM4) */
#endif  #endif
:  :
over >r dup >r abs -rot   over >r dup >r abs -rot
Line 957  ud = (UDCell)u1 * (UDCell)u2; Line 981  ud = (UDCell)u1 * (UDCell)u2;
um/mod  ( ud u1 -- u2 u3 )              core    u_m_slash_mod  um/mod  ( ud u1 -- u2 u3 )              core    u_m_slash_mod
""ud=u3*u1+u2, u1>u2>=0""  ""ud=u3*u1+u2, u1>u2>=0""
#ifdef BUGGY_LL_DIV  #ifdef BUGGY_LL_DIV
#ifdef ASM_UM_SLASH_MOD
ASM_UM_SLASH_MOD(ud.lo, ud.hi, u1, u2, u3);
#else /* !defined(ASM_UM_SLASH_MOD) */
UDCell r = umdiv(ud,u1);  UDCell r = umdiv(ud,u1);
u2=DHI(r);  u2=DHI(r);
u3=DLO(r);  u3=DLO(r);
#endif /* !defined(ASM_UM_SLASH_MOD) */
#else  #else
#ifdef ASM_UM_SLASH_MOD4
ASM_UM_SLASH_MOD4(ud, u1, u2, u3);
#else /* !defined(ASM_UM_SLASH_MOD4) */
u3 = ud/u1;  u3 = ud/u1;
u2 = ud%u1;  u2 = ud%u1;
#endif /* !defined(ASM_UM_SLASH_MOD4) */
#endif  #endif
:  :
0 swap [ 8 cells 1 + ] literal 0     0 swap [ 8 cells 1 + ] literal 0
Line 2118  f2=FLAG(isdigit((unsigned)(sig[0]))!=0); Line 2150  f2=FLAG(isdigit((unsigned)(sig[0]))!=0);
siglen=strlen(sig);  siglen=strlen(sig);
if (siglen>u) /* happens in glibc-2.1.3 if 999.. is rounded up */  if (siglen>u) /* happens in glibc-2.1.3 if 999.. is rounded up */
siglen=u;    siglen=u;
if (!f2) /* workaround Cygwin trailing 0s for Inf and Nan */
for (; sig[siglen-1]=='0'; siglen--);
;