diff --git a/pipelined/src/fpu/fdivsqrt/fdivsqrtpreproc.sv b/pipelined/src/fpu/fdivsqrt/fdivsqrtpreproc.sv
index f20c7ceb0..f2355ef48 100644
--- a/pipelined/src/fpu/fdivsqrt/fdivsqrtpreproc.sv
+++ b/pipelined/src/fpu/fdivsqrt/fdivsqrtpreproc.sv
@@ -70,30 +70,31 @@ module fdivsqrtpreproc (
   // cout the number of leading zeros
 
   if (`IDIV_ON_FPU) begin
-    // *** W64 muxes conditional on RV64
     // *** why !FUnct3E
-    assign AsE = ~Funct3E[0] & (W64E ? ForwardedSrcAE[31] : ForwardedSrcAE[`XLEN-1]);
-    assign BsE = ~Funct3E[0] & (W64E ? ForwardedSrcBE[31] : ForwardedSrcBE[`XLEN-1]);
-    assign A64 = W64E ? {{(`XLEN-32){AsE}}, ForwardedSrcAE[31:0]} : ForwardedSrcAE;
-    assign B64 = W64E ? {{(`XLEN-32){BsE}}, ForwardedSrcBE[31:0]} : ForwardedSrcBE;
-    assign A64Src = W64E ? {{(`XLEN-32){ForwardedSrcAE[31]}}, ForwardedSrcAE[31:0]} : ForwardedSrcAE;
+
+    if (`XLEN==64) begin // 64-bit, supports W64
+      assign AsE = ~Funct3E[0] & (W64E ? ForwardedSrcAE[31] : ForwardedSrcAE[`XLEN-1]);
+      assign BsE = ~Funct3E[0] & (W64E ? ForwardedSrcBE[31] : ForwardedSrcBE[`XLEN-1]);
+      assign A64 = W64E ? {{(`XLEN-32){AsE}}, ForwardedSrcAE[31:0]} : ForwardedSrcAE;  // *** rename this
+      assign B64 = W64E ? {{(`XLEN-32){BsE}}, ForwardedSrcBE[31:0]} : ForwardedSrcBE;
+//      assign A64Src = W64E ? {{(`XLEN-32){ForwardedSrcAE[31]}}, ForwardedSrcAE[31:0]} : ForwardedSrcAE;
+      assign AZeroE = W64E ? ~(|ForwardedSrcAE[31:0]) : ~(|ForwardedSrcAE);
+      assign BZeroE = W64E ? ~(|ForwardedSrcBE[31:0]) : ~(|ForwardedSrcBE);
+    end else begin // 32 bits only
+      assign AsE = ~Funct3E[0] & ForwardedSrcAE[`XLEN-1];
+      assign BsE = ~Funct3E[0] & ForwardedSrcBE[`XLEN-1];
+      assign A64 = ForwardedSrcAE;
+      assign B64 = ForwardedSrcBE;
+//      assign A64Src = ForwardedSrcAE;
+      assign AZeroE = ~(|ForwardedSrcAE);
+      assign BZeroE = ~(|ForwardedSrcBE);
+    end
 
     assign NegQuotE = (AsE ^ BsE) & MDUE;
     
     assign PosA = AsE ? -A64 : A64;
     assign PosB = BsE ? -B64 : B64;
-    assign AZeroE = W64E ? ~(|ForwardedSrcAE[31:0]) : ~(|ForwardedSrcAE);
-    assign BZeroE = W64E ? ~(|ForwardedSrcBE[31:0]) : ~(|ForwardedSrcBE);
 
-/*
-    assign IFNormLenX = MDUE ? {PosA, {(`DIVb-`XLEN){1'b0}}} : {Xm, {(`DIVb-`NF-1){1'b0}}};
-    assign IFNormLenD = MDUE ? {PosB, {(`DIVb-`XLEN){1'b0}}} : {Ym, {(`DIVb-`NF-1){1'b0}}};
-    lzc #(`DIVb) lzcX (IFNormLenX, ell);
-    lzc #(`DIVb) lzcY (IFNormLenD, mE);
-
-    assign XPreproc = IFNormLenX << (ell + {{`DIVBLEN{1'b0}}, 1'b1}); // had issue with (`DIVBLEN+1)'(~MDUE) so using this instead
-    assign DPreproc = IFNormLenD << (mE + {{`DIVBLEN{1'b0}}, 1'b1}); // replaced ~MDUE with 1 bc we always want that extra left shift
-*/
     assign ZeroDiff = mE - ell;
     assign ALTBE = ZeroDiff[`DIVBLEN]; // A less than B
     assign p = ALTBE ? '0 : ZeroDiff;
@@ -101,17 +102,29 @@ module fdivsqrtpreproc (
   /* verilator lint_off WIDTH */
     assign pPlusr = (`DIVBLEN)'(`LOGR) + p;
     assign pPrTrunc = pPlusr % `RK;
-  //assign pPrTrunc = (`LOGRK == 0) ? 0 : pPlusr[`LOGRK-1:0];
     assign pPrCeil = (pPlusr >> `LOGRK) + {{`DIVBLEN{1'b0}}, |(pPrTrunc)};
     assign nE = (pPrCeil * (`DIVBLEN+1)'(`DIVCOPIES)) - {{(`DIVBLEN){1'b0}}, 1'b1};
     assign IntBits = (`DIVBLEN)'(`LOGR) + p - {{(`DIVBLEN){1'b0}}, 1'b1};
     assign RightShiftX = ((`DIVBLEN)'(`RK) - 1) - (IntBits % `RK);
-  //assign RightShiftX = (`LOGRK == 0) ? 0 : ((`DIVBLEN)'(`RK) - 1) - {{(`DIVBLEN - `RK){1'b0}}, IntBits[`LOGRK-1:0]};
   /* verilator lint_on WIDTH */
 
     assign NumZeroE = MDUE ? AZeroE : XZeroE;
 
     assign X = MDUE ? DivX >> RightShiftX : PreShiftX;
+
+    // pipeline registers
+    flopen #(1)        mdureg(clk, IFDivStartE, MDUE, MDUM);
+    flopen #(1)        w64reg(clk, IFDivStartE, W64E, W64M);
+    flopen #(`DIVBLEN+1) nreg(clk, IFDivStartE, nE, nM);
+    flopen #(`DIVBLEN+1) mreg(clk, IFDivStartE, mE, mM);
+    flopen #(1)       altbreg(clk, IFDivStartE, ALTBE, ALTBM);
+    flopen #(1)    negquotreg(clk, IFDivStartE, NegQuotE, NegQuotM);
+    flopen #(1)      azeroreg(clk, IFDivStartE, AZeroE, AZeroM);
+    flopen #(1)      bzeroreg(clk, IFDivStartE, BZeroE, BZeroM);
+    flopen #(1)      asignreg(clk, IFDivStartE, AsE, AsM);
+//    flopen #(`XLEN)   srcareg(clk, IFDivStartE, A64Src, ForwardedSrcAM);
+    flopen #(`XLEN)   srcareg(clk, IFDivStartE, A64, ForwardedSrcAM);
+
   end else begin
     assign NumZeroE = XZeroE;
     assign X = PreShiftX;
@@ -125,7 +138,6 @@ module fdivsqrtpreproc (
   assign XPreproc = IFNormLenX << (ell + {{`DIVBLEN{1'b0}}, 1'b1}); // had issue with (`DIVBLEN+1)'(~MDUE) so using this instead
   assign DPreproc = IFNormLenD << (mE + {{`DIVBLEN{1'b0}}, 1'b1}); // replaced ~MDUE with 1 bc we always want that extra left shift
 
-
   assign SqrtX = (Xe[0]^ell[0]) ? {1'b0, ~NumZeroE, XPreproc[`DIVb-1:1]} : {~NumZeroE, XPreproc}; // Bottom bit of XPreproc is always zero because DIVb is larger than XLEN and NF
   assign DivX = {3'b000, ~NumZeroE, XPreproc};
 
@@ -145,18 +157,7 @@ module fdivsqrtpreproc (
   // r = 1 or 2
   // DIVRESLEN/(r*`DIVCOPIES)
 
-  flopen #(1)    negquotreg(clk, IFDivStartE, NegQuotE, NegQuotM);
-  flopen #(1)       altbreg(clk, IFDivStartE, ALTBE, ALTBM);
-  flopen #(1)      azeroreg(clk, IFDivStartE, AZeroE, AZeroM);
-  flopen #(1)      bzeroreg(clk, IFDivStartE, BZeroE, BZeroM);
-  flopen #(1)      asignreg(clk, IFDivStartE, AsE, AsM);
-  flopen #(1)        mdureg(clk, IFDivStartE, MDUE, MDUM);
-  flopen #(1)        w64reg(clk, IFDivStartE, W64E, W64M);
-  flopen #(`DIVBLEN+1) nreg(clk, IFDivStartE, nE, nM);
-  flopen #(`DIVBLEN+1) mreg(clk, IFDivStartE, mE, mM);
   flopen #(`NE+2)    expreg(clk, IFDivStartE, QeE, QeM);
-  flopen #(`XLEN)   srcareg(clk, IFDivStartE, A64Src, ForwardedSrcAM);
-
 
 endmodule