Skip to content

Commit 6015b22

Browse files
gitoleglanza
authored andcommitted
[CIR][CIRGen] Revisiting CIR generation for bitfields. Fixes #13 (#268)
This is an updated PR for [PR #233](#233) with some fixes explained in [PR #261](#261) which now can be safely closed. First of all, let me introduce how do the bitfields looks like in CIR. For the struct `S` defined as following: ``` typedef struct { int a : 4; int b : 27; int c : 17; int d : 2; int e : 15; unsigned f; } S; ``` the CIR type is `!ty_22S22 = !cir.struct<struct "S" {!u32i, !u32i, !u16i, !u32i} #cir.record.decl.ast>` where all the bitfields are packed in the first three storages. Also, the next bugs was fixed: - type mismatch - index out of bounds - single bitfield of size < 8 The cases covered with tests.
1 parent c8bbd86 commit 6015b22

10 files changed

+600
-84
lines changed

clang/lib/CIR/CodeGen/CIRGenBuilder.h

+64
Original file line numberDiff line numberDiff line change
@@ -460,6 +460,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder {
460460
return getConstInt(
461461
loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue());
462462
}
463+
mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ,
464+
const llvm::APInt &val) {
465+
return create<mlir::cir::ConstantOp>(loc, typ,
466+
getAttr<mlir::cir::IntAttr>(typ, val));
467+
}
463468
mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) {
464469
return create<mlir::cir::ConstantOp>(loc, getBoolTy(),
465470
getCIRBoolAttr(state));
@@ -677,6 +682,65 @@ class CIRGenBuilderTy : public mlir::OpBuilder {
677682
mlir::cir::UnaryOpKind::Not, value);
678683
}
679684

685+
mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind,
686+
const llvm::APInt &rhs) {
687+
return create<mlir::cir::BinOp>(
688+
lhs.getLoc(), lhs.getType(), kind, lhs,
689+
getConstAPInt(lhs.getLoc(), lhs.getType(), rhs));
690+
}
691+
692+
mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind,
693+
mlir::Value rhs) {
694+
return create<mlir::cir::BinOp>(lhs.getLoc(), lhs.getType(), kind, lhs,
695+
rhs);
696+
}
697+
698+
mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs,
699+
bool isShiftLeft) {
700+
return create<mlir::cir::ShiftOp>(
701+
lhs.getLoc(), lhs.getType(), lhs,
702+
getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft);
703+
}
704+
705+
mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) {
706+
auto width = lhs.getType().dyn_cast<mlir::cir::IntType>().getWidth();
707+
auto shift = llvm::APInt(width, bits);
708+
return createShift(lhs, shift, isShiftLeft);
709+
}
710+
711+
mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) {
712+
return createShift(lhs, bits, true);
713+
}
714+
715+
mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) {
716+
return createShift(lhs, bits, false);
717+
}
718+
719+
mlir::Value createLowBitsSet(mlir::Location loc, unsigned size,
720+
unsigned bits) {
721+
auto val = llvm::APInt::getLowBitsSet(size, bits);
722+
auto typ = mlir::cir::IntType::get(getContext(), size, false);
723+
return getConstAPInt(loc, typ, val);
724+
}
725+
726+
mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) {
727+
auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs);
728+
return createBinop(lhs, mlir::cir::BinOpKind::And, val);
729+
}
730+
731+
mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) {
732+
return createBinop(lhs, mlir::cir::BinOpKind::And, rhs);
733+
}
734+
735+
mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) {
736+
auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs);
737+
return createBinop(lhs, mlir::cir::BinOpKind::Or, val);
738+
}
739+
740+
mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) {
741+
return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs);
742+
}
743+
680744
//===--------------------------------------------------------------------===//
681745
// Cast/Conversion Operators
682746
//===--------------------------------------------------------------------===//

clang/lib/CIR/CodeGen/CIRGenExpr.cpp

+220-12
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include "clang/AST/GlobalDecl.h"
2222
#include "clang/Basic/Builtins.h"
2323
#include "clang/CIR/Dialect/IR/CIRDialect.h"
24+
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
2425
#include "clang/CIR/Dialect/IR/CIRTypes.h"
2526
#include "llvm/Support/Casting.h"
2627
#include "llvm/Support/ErrorHandling.h"
@@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E,
128129
if (PtrTy->getPointeeType()->isVoidType())
129130
break;
130131
assert(!UnimplementedFeature::tbaa());
132+
131133
LValueBaseInfo InnerBaseInfo;
132134
Address Addr = CGF.buildPointerWithAlignment(
133135
CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull);
@@ -211,13 +213,79 @@ static Address buildPointerWithAlignment(const Expr *E,
211213
return Address(CGF.buildScalarExpr(E), Align);
212214
}
213215

216+
/// Helper method to check if the underlying ABI is AAPCS
217+
static bool isAAPCS(const TargetInfo &TargetInfo) {
218+
return TargetInfo.getABI().starts_with("aapcs");
219+
}
220+
221+
Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base,
222+
const FieldDecl *field,
223+
unsigned index,
224+
unsigned size) {
225+
if (index == 0)
226+
return base.getAddress();
227+
228+
auto loc = getLoc(field->getLocation());
229+
auto fieldType = builder.getUIntNTy(size);
230+
231+
auto fieldPtr =
232+
mlir::cir::PointerType::get(getBuilder().getContext(), fieldType);
233+
auto sea = getBuilder().createGetMember(
234+
loc, fieldPtr, base.getPointer(), field->getName(), index);
235+
236+
return Address(sea, CharUnits::One());
237+
}
238+
239+
static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base,
240+
const CIRGenBitFieldInfo &info,
241+
const FieldDecl *field) {
242+
return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
243+
info.VolatileStorageSize != 0 &&
244+
field->getType()
245+
.withCVRQualifiers(base.getVRQualifiers())
246+
.isVolatileQualified();
247+
}
248+
249+
LValue CIRGenFunction::buildLValueForBitField(LValue base,
250+
const FieldDecl *field) {
251+
252+
LValueBaseInfo BaseInfo = base.getBaseInfo();
253+
const RecordDecl *rec = field->getParent();
254+
auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent());
255+
auto &info = layout.getBitFieldInfo(field);
256+
auto useVolatile = useVolatileForBitField(CGM, base, info, field);
257+
unsigned Idx = layout.getCIRFieldNo(field);
258+
259+
if (useVolatile ||
260+
(IsInPreservedAIRegion ||
261+
(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>()))) {
262+
llvm_unreachable("NYI");
263+
}
264+
265+
const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize;
266+
Address Addr = getAddrOfBitFieldStorage(base, field, Idx, SS);
267+
268+
// Get the access type.
269+
mlir::Type FieldIntTy = builder.getUIntNTy(SS);
270+
271+
auto loc = getLoc(field->getLocation());
272+
if (Addr.getElementType() != FieldIntTy)
273+
Addr = builder.createElementBitCast(loc, Addr, FieldIntTy);
274+
275+
QualType fieldType =
276+
field->getType().withCVRQualifiers(base.getVRQualifiers());
277+
278+
assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields");
279+
LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
280+
return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo);
281+
}
282+
214283
LValue CIRGenFunction::buildLValueForField(LValue base,
215284
const FieldDecl *field) {
216285
LValueBaseInfo BaseInfo = base.getBaseInfo();
217286

218-
if (field->isBitField()) {
219-
llvm_unreachable("NYI");
220-
}
287+
if (field->isBitField())
288+
return buildLValueForBitField(base, field);
221289

222290
// Fields of may-alias structures are may-alais themselves.
223291
// FIXME: this hould get propagated down through anonymous structs and unions.
@@ -522,12 +590,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
522590
/// method emits the address of the lvalue, then loads the result as an rvalue,
523591
/// returning the rvalue.
524592
RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) {
525-
assert(LV.isSimple() && "not implemented");
526593
assert(!LV.getType()->isFunctionType());
527594
assert(!(LV.getType()->isConstantMatrixType()) && "not implemented");
528595

529-
// Everything needs a load.
530-
return RValue::get(buildLoadOfScalar(LV, Loc));
596+
if (LV.isBitField())
597+
return buildLoadOfBitfieldLValue(LV, Loc);
598+
599+
if (LV.isSimple())
600+
return RValue::get(buildLoadOfScalar(LV, Loc));
601+
llvm_unreachable("NYI");
602+
}
603+
604+
RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV,
605+
SourceLocation Loc) {
606+
const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo();
607+
608+
// Get the output type.
609+
mlir::Type ResLTy = convertType(LV.getType());
610+
Address Ptr = LV.getBitFieldAddress();
611+
mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr);
612+
auto ValWidth = Val.getType().cast<IntType>().getWidth();
613+
614+
bool UseVolatile = LV.isVolatileQualified() &&
615+
Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
616+
const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
617+
const unsigned StorageSize =
618+
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
619+
620+
if (Info.IsSigned) {
621+
assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
622+
623+
mlir::Type typ = builder.getSIntNTy(ValWidth);
624+
Val = builder.createIntCast(Val, typ);
625+
626+
unsigned HighBits = StorageSize - Offset - Info.Size;
627+
if (HighBits)
628+
Val = builder.createShiftLeft(Val, HighBits);
629+
if (Offset + HighBits)
630+
Val = builder.createShiftRight(Val, Offset + HighBits);
631+
} else {
632+
if (Offset)
633+
Val = builder.createShiftRight(Val, Offset);
634+
635+
if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
636+
Val = builder.createAnd(Val,
637+
llvm::APInt::getLowBitsSet(ValWidth, Info.Size));
638+
}
639+
Val = builder.createIntCast(Val, ResLTy);
640+
assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI");
641+
return RValue::get(Val);
531642
}
532643

533644
void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
@@ -550,6 +661,83 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
550661
buildStoreOfScalar(Src.getScalarVal(), Dst);
551662
}
552663

664+
void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst,
665+
mlir::Value &Result) {
666+
const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo();
667+
mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType());
668+
Address Ptr = Dst.getBitFieldAddress();
669+
670+
// Get the source value, truncated to the width of the bit-field.
671+
mlir::Value SrcVal = Src.getScalarVal();
672+
673+
// Cast the source to the storage type and shift it into place.
674+
SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType());
675+
auto SrcWidth = SrcVal.getType().cast<IntType>().getWidth();
676+
mlir::Value MaskedVal = SrcVal;
677+
678+
const bool UseVolatile =
679+
CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
680+
Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
681+
const unsigned StorageSize =
682+
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
683+
const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
684+
// See if there are other bits in the bitfield's storage we'll need to load
685+
// and mask together with source before storing.
686+
if (StorageSize != Info.Size) {
687+
assert(StorageSize > Info.Size && "Invalid bitfield size.");
688+
689+
mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc());
690+
691+
// Mask the source value as needed.
692+
if (!hasBooleanRepresentation(Dst.getType()))
693+
SrcVal = builder.createAnd(
694+
SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size));
695+
696+
MaskedVal = SrcVal;
697+
if (Offset)
698+
SrcVal = builder.createShiftLeft(SrcVal, Offset);
699+
700+
// Mask out the original value.
701+
Val = builder.createAnd(
702+
Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size));
703+
704+
// Or together the unchanged values and the source value.
705+
SrcVal = builder.createOr(Val, SrcVal);
706+
707+
} else {
708+
// According to the AACPS:
709+
// When a volatile bit-field is written, and its container does not overlap
710+
// with any non-bit-field member, its container must be read exactly once
711+
// and written exactly once using the access width appropriate to the type
712+
// of the container. The two accesses are not atomic.
713+
if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
714+
CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
715+
llvm_unreachable("volatile bit-field is not implemented for the AACPS");
716+
}
717+
718+
// Write the new value back out.
719+
// TODO: constant matrix type, volatile, no init, non temporal, TBAA
720+
buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(),
721+
Dst.getBaseInfo(), false, false);
722+
723+
// Return the new value of the bit-field.
724+
mlir::Value ResultVal = MaskedVal;
725+
ResultVal = builder.createIntCast(ResultVal, ResLTy);
726+
727+
// Sign extend the value if needed.
728+
if (Info.IsSigned) {
729+
assert(Info.Size <= StorageSize);
730+
unsigned HighBits = StorageSize - Info.Size;
731+
732+
if (HighBits) {
733+
ResultVal = builder.createShiftLeft(ResultVal, HighBits);
734+
ResultVal = builder.createShiftRight(ResultVal, HighBits);
735+
}
736+
}
737+
738+
Result = buildFromMemory(ResultVal, Dst.getType());
739+
}
740+
553741
static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E,
554742
const VarDecl *VD) {
555743
QualType T = E->getType();
@@ -773,7 +961,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
773961
LValue LV = buildLValue(E->getLHS());
774962

775963
SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())};
776-
buildStoreThroughLValue(RV, LV);
964+
if (LV.isBitField()) {
965+
mlir::Value result;
966+
buildStoreThroughBitfieldLValue(RV, LV, result);
967+
} else {
968+
buildStoreThroughLValue(RV, LV);
969+
}
970+
777971
assert(!getContext().getLangOpts().OpenMP &&
778972
"last priv cond not implemented");
779973
return LV;
@@ -2207,6 +2401,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
22072401

22082402
mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue,
22092403
SourceLocation Loc) {
2404+
return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
2405+
lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(),
2406+
lvalue.isNontemporal());
2407+
}
2408+
2409+
mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue,
2410+
mlir::Location Loc) {
22102411
return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
22112412
lvalue.getType(), Loc, lvalue.getBaseInfo(),
22122413
lvalue.isNontemporal());
@@ -2224,6 +2425,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
22242425
QualType Ty, SourceLocation Loc,
22252426
LValueBaseInfo BaseInfo,
22262427
bool isNontemporal) {
2428+
return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo,
2429+
isNontemporal);
2430+
}
2431+
2432+
mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2433+
QualType Ty, mlir::Location Loc,
2434+
LValueBaseInfo BaseInfo,
2435+
bool isNontemporal) {
22272436
if (!CGM.getCodeGenOpts().PreserveVec3Type) {
22282437
if (Ty->isVectorType()) {
22292438
llvm_unreachable("NYI");
@@ -2237,15 +2446,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
22372446
}
22382447

22392448
mlir::cir::LoadOp Load = builder.create<mlir::cir::LoadOp>(
2240-
getLoc(Loc), Addr.getElementType(), Addr.getPointer());
2449+
Loc, Addr.getElementType(), Addr.getPointer());
22412450

22422451
if (isNontemporal) {
22432452
llvm_unreachable("NYI");
22442453
}
2245-
2246-
// TODO: TBAA
2247-
2248-
// TODO: buildScalarRangeCheck
2454+
2455+
assert(!UnimplementedFeature::tbaa() && "NYI");
2456+
assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI");
22492457

22502458
return buildFromMemory(Load, Ty);
22512459
}

0 commit comments

Comments
 (0)