21
21
#include " clang/AST/GlobalDecl.h"
22
22
#include " clang/Basic/Builtins.h"
23
23
#include " clang/CIR/Dialect/IR/CIRDialect.h"
24
+ #include " clang/CIR/Dialect/IR/CIROpsEnums.h"
24
25
#include " clang/CIR/Dialect/IR/CIRTypes.h"
25
26
#include " llvm/Support/Casting.h"
26
27
#include " llvm/Support/ErrorHandling.h"
@@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E,
128
129
if (PtrTy->getPointeeType ()->isVoidType ())
129
130
break ;
130
131
assert (!UnimplementedFeature::tbaa ());
132
+
131
133
LValueBaseInfo InnerBaseInfo;
132
134
Address Addr = CGF.buildPointerWithAlignment (
133
135
CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -211,13 +213,79 @@ static Address buildPointerWithAlignment(const Expr *E,
211
213
return Address (CGF.buildScalarExpr (E), Align);
212
214
}
213
215
216
+ // / Helper method to check if the underlying ABI is AAPCS
217
+ static bool isAAPCS (const TargetInfo &TargetInfo) {
218
+ return TargetInfo.getABI ().starts_with (" aapcs" );
219
+ }
220
+
221
+ Address CIRGenFunction::getAddrOfBitFieldStorage (LValue base,
222
+ const FieldDecl *field,
223
+ unsigned index,
224
+ unsigned size) {
225
+ if (index == 0 )
226
+ return base.getAddress ();
227
+
228
+ auto loc = getLoc (field->getLocation ());
229
+ auto fieldType = builder.getUIntNTy (size);
230
+
231
+ auto fieldPtr =
232
+ mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
233
+ auto sea = getBuilder ().createGetMember (
234
+ loc, fieldPtr, base.getPointer (), field->getName (), index );
235
+
236
+ return Address (sea, CharUnits::One ());
237
+ }
238
+
239
+ static bool useVolatileForBitField (const CIRGenModule &cgm, LValue base,
240
+ const CIRGenBitFieldInfo &info,
241
+ const FieldDecl *field) {
242
+ return isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth &&
243
+ info.VolatileStorageSize != 0 &&
244
+ field->getType ()
245
+ .withCVRQualifiers (base.getVRQualifiers ())
246
+ .isVolatileQualified ();
247
+ }
248
+
249
+ LValue CIRGenFunction::buildLValueForBitField (LValue base,
250
+ const FieldDecl *field) {
251
+
252
+ LValueBaseInfo BaseInfo = base.getBaseInfo ();
253
+ const RecordDecl *rec = field->getParent ();
254
+ auto &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
255
+ auto &info = layout.getBitFieldInfo (field);
256
+ auto useVolatile = useVolatileForBitField (CGM, base, info, field);
257
+ unsigned Idx = layout.getCIRFieldNo (field);
258
+
259
+ if (useVolatile ||
260
+ (IsInPreservedAIRegion ||
261
+ (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
262
+ llvm_unreachable (" NYI" );
263
+ }
264
+
265
+ const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize ;
266
+ Address Addr = getAddrOfBitFieldStorage (base, field, Idx, SS);
267
+
268
+ // Get the access type.
269
+ mlir::Type FieldIntTy = builder.getUIntNTy (SS);
270
+
271
+ auto loc = getLoc (field->getLocation ());
272
+ if (Addr.getElementType () != FieldIntTy)
273
+ Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
274
+
275
+ QualType fieldType =
276
+ field->getType ().withCVRQualifiers (base.getVRQualifiers ());
277
+
278
+ assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" );
279
+ LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
280
+ return LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
281
+ }
282
+
214
283
LValue CIRGenFunction::buildLValueForField (LValue base,
215
284
const FieldDecl *field) {
216
285
LValueBaseInfo BaseInfo = base.getBaseInfo ();
217
286
218
- if (field->isBitField ()) {
219
- llvm_unreachable (" NYI" );
220
- }
287
+ if (field->isBitField ())
288
+ return buildLValueForBitField (base, field);
221
289
222
290
// Fields of may-alias structures are may-alais themselves.
223
291
// FIXME: this hould get propagated down through anonymous structs and unions.
@@ -522,12 +590,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
522
590
// / method emits the address of the lvalue, then loads the result as an rvalue,
523
591
// / returning the rvalue.
524
592
RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
525
- assert (LV.isSimple () && " not implemented" );
526
593
assert (!LV.getType ()->isFunctionType ());
527
594
assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" );
528
595
529
- // Everything needs a load.
530
- return RValue::get (buildLoadOfScalar (LV, Loc));
596
+ if (LV.isBitField ())
597
+ return buildLoadOfBitfieldLValue (LV, Loc);
598
+
599
+ if (LV.isSimple ())
600
+ return RValue::get (buildLoadOfScalar (LV, Loc));
601
+ llvm_unreachable (" NYI" );
602
+ }
603
+
604
+ RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
605
+ SourceLocation Loc) {
606
+ const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
607
+
608
+ // Get the output type.
609
+ mlir::Type ResLTy = convertType (LV.getType ());
610
+ Address Ptr = LV.getBitFieldAddress ();
611
+ mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr );
612
+ auto ValWidth = Val.getType ().cast <IntType>().getWidth ();
613
+
614
+ bool UseVolatile = LV.isVolatileQualified () &&
615
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
616
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
617
+ const unsigned StorageSize =
618
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
619
+
620
+ if (Info.IsSigned ) {
621
+ assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
622
+
623
+ mlir::Type typ = builder.getSIntNTy (ValWidth);
624
+ Val = builder.createIntCast (Val, typ);
625
+
626
+ unsigned HighBits = StorageSize - Offset - Info.Size ;
627
+ if (HighBits)
628
+ Val = builder.createShiftLeft (Val, HighBits);
629
+ if (Offset + HighBits)
630
+ Val = builder.createShiftRight (Val, Offset + HighBits);
631
+ } else {
632
+ if (Offset)
633
+ Val = builder.createShiftRight (Val, Offset);
634
+
635
+ if (static_cast <unsigned >(Offset) + Info.Size < StorageSize)
636
+ Val = builder.createAnd (Val,
637
+ llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
638
+ }
639
+ Val = builder.createIntCast (Val, ResLTy);
640
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
641
+ return RValue::get (Val);
531
642
}
532
643
533
644
void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -550,6 +661,83 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
550
661
buildStoreOfScalar (Src.getScalarVal (), Dst);
551
662
}
552
663
664
+ void CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
665
+ mlir::Value &Result) {
666
+ const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
667
+ mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
668
+ Address Ptr = Dst.getBitFieldAddress ();
669
+
670
+ // Get the source value, truncated to the width of the bit-field.
671
+ mlir::Value SrcVal = Src.getScalarVal ();
672
+
673
+ // Cast the source to the storage type and shift it into place.
674
+ SrcVal = builder.createIntCast (SrcVal, Ptr .getElementType ());
675
+ auto SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
676
+ mlir::Value MaskedVal = SrcVal;
677
+
678
+ const bool UseVolatile =
679
+ CGM.getCodeGenOpts ().AAPCSBitfieldWidth && Dst.isVolatileQualified () &&
680
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
681
+ const unsigned StorageSize =
682
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
683
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
684
+ // See if there are other bits in the bitfield's storage we'll need to load
685
+ // and mask together with source before storing.
686
+ if (StorageSize != Info.Size ) {
687
+ assert (StorageSize > Info.Size && " Invalid bitfield size." );
688
+
689
+ mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
690
+
691
+ // Mask the source value as needed.
692
+ if (!hasBooleanRepresentation (Dst.getType ()))
693
+ SrcVal = builder.createAnd (
694
+ SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
695
+
696
+ MaskedVal = SrcVal;
697
+ if (Offset)
698
+ SrcVal = builder.createShiftLeft (SrcVal, Offset);
699
+
700
+ // Mask out the original value.
701
+ Val = builder.createAnd (
702
+ Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
703
+
704
+ // Or together the unchanged values and the source value.
705
+ SrcVal = builder.createOr (Val, SrcVal);
706
+
707
+ } else {
708
+ // According to the AACPS:
709
+ // When a volatile bit-field is written, and its container does not overlap
710
+ // with any non-bit-field member, its container must be read exactly once
711
+ // and written exactly once using the access width appropriate to the type
712
+ // of the container. The two accesses are not atomic.
713
+ if (Dst.isVolatileQualified () && isAAPCS (CGM.getTarget ()) &&
714
+ CGM.getCodeGenOpts ().ForceAAPCSBitfieldLoad )
715
+ llvm_unreachable (" volatile bit-field is not implemented for the AACPS" );
716
+ }
717
+
718
+ // Write the new value back out.
719
+ // TODO: constant matrix type, volatile, no init, non temporal, TBAA
720
+ buildStoreOfScalar (SrcVal, Ptr , Dst.isVolatileQualified (), Dst.getType (),
721
+ Dst.getBaseInfo (), false , false );
722
+
723
+ // Return the new value of the bit-field.
724
+ mlir::Value ResultVal = MaskedVal;
725
+ ResultVal = builder.createIntCast (ResultVal, ResLTy);
726
+
727
+ // Sign extend the value if needed.
728
+ if (Info.IsSigned ) {
729
+ assert (Info.Size <= StorageSize);
730
+ unsigned HighBits = StorageSize - Info.Size ;
731
+
732
+ if (HighBits) {
733
+ ResultVal = builder.createShiftLeft (ResultVal, HighBits);
734
+ ResultVal = builder.createShiftRight (ResultVal, HighBits);
735
+ }
736
+ }
737
+
738
+ Result = buildFromMemory (ResultVal, Dst.getType ());
739
+ }
740
+
553
741
static LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const Expr *E,
554
742
const VarDecl *VD) {
555
743
QualType T = E->getType ();
@@ -773,7 +961,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
773
961
LValue LV = buildLValue (E->getLHS ());
774
962
775
963
SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
776
- buildStoreThroughLValue (RV, LV);
964
+ if (LV.isBitField ()) {
965
+ mlir::Value result;
966
+ buildStoreThroughBitfieldLValue (RV, LV, result);
967
+ } else {
968
+ buildStoreThroughLValue (RV, LV);
969
+ }
970
+
777
971
assert (!getContext ().getLangOpts ().OpenMP &&
778
972
" last priv cond not implemented" );
779
973
return LV;
@@ -2207,6 +2401,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
2207
2401
2208
2402
mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2209
2403
SourceLocation Loc) {
2404
+ return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2405
+ lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2406
+ lvalue.isNontemporal ());
2407
+ }
2408
+
2409
+ mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2410
+ mlir::Location Loc) {
2210
2411
return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2211
2412
lvalue.getType (), Loc, lvalue.getBaseInfo (),
2212
2413
lvalue.isNontemporal ());
@@ -2224,6 +2425,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2224
2425
QualType Ty, SourceLocation Loc,
2225
2426
LValueBaseInfo BaseInfo,
2226
2427
bool isNontemporal) {
2428
+ return buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2429
+ isNontemporal);
2430
+ }
2431
+
2432
+ mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool Volatile,
2433
+ QualType Ty, mlir::Location Loc,
2434
+ LValueBaseInfo BaseInfo,
2435
+ bool isNontemporal) {
2227
2436
if (!CGM.getCodeGenOpts ().PreserveVec3Type ) {
2228
2437
if (Ty->isVectorType ()) {
2229
2438
llvm_unreachable (" NYI" );
@@ -2237,15 +2446,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2237
2446
}
2238
2447
2239
2448
mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2240
- getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
2449
+ Loc, Addr.getElementType (), Addr.getPointer ());
2241
2450
2242
2451
if (isNontemporal) {
2243
2452
llvm_unreachable (" NYI" );
2244
2453
}
2245
-
2246
- // TODO: TBAA
2247
-
2248
- // TODO: buildScalarRangeCheck
2454
+
2455
+ assert (!UnimplementedFeature::tbaa () && " NYI" );
2456
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
2249
2457
2250
2458
return buildFromMemory (Load, Ty);
2251
2459
}
0 commit comments