-
Notifications
You must be signed in to change notification settings - Fork 155
/
UnitOfWorkImpl.java
6228 lines (5662 loc) · 262 KB
/
UnitOfWorkImpl.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 1998, 2021 Oracle and/or its affiliates. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0,
* or the Eclipse Distribution License v. 1.0 which is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
*/
// Contributors:
// Oracle - initial API and implementation from Oracle TopLink
// 05/28/2008-1.0M8 Andrei Ilitchev
// - 224964: Provide support for Proxy Authentication through JPA.
// The class was amended to allow it to instantiate ValueHolders after release method has been called
// (internalExecuteQuery method no longer throws exception if the uow is dead).
// Note that release method clears change sets but keeps the cache.
// 02/11/2009-1.1 Michael O'Brien
// - 259993: 1) Defer a full clear(true) call from entityManager.clear() to release()
// only if uow lifecycle is 1,2 or 4 (//Pending) and perform a clear of the cache only in this case.
// 2) During mergeClonesAfterCompletion() If the the acquire and release threads are different
// switch back to the stored acquire thread stored on the mergeManager.
// 17/04/2009-1.1 Michael O'Brien
// - 272022: For rollback scenarios - If the current thread and the active thread
// on the mutex do not match for read locks (not yet transitioned to deferred locks) - switch them
// 07/16/2009-2.0 Guy Pelletier
// - 277039: JPA 2.0 Cache Usage Settings
// 07/15/2011-2.2.1 Guy Pelletier
// - 349424: persists during an preCalculateUnitOfWorkChangeSet event are lost
// 14/05/2012-2.4 Guy Pelletier
// - 376603: Provide for table per tenant support for multitenant applications
// 08/11/2012-2.5 Guy Pelletier
// - 393867: Named queries do not work when using EM level Table Per Tenant Multitenancy.
// 09/03/2015 - Will Dazey
// - 456067 : Added support for defining query timeout units
// 01/29/2019-3.0 Sureshkumar Balakrishnan
// - 541873: ENTITYMANAGER.DETACH() TRIGGERS LAZY LOADING INTO THE PERSISTENCE CONTEXT
package org.eclipse.persistence.internal.sessions;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import org.eclipse.persistence.annotations.CacheKeyType;
import org.eclipse.persistence.config.ReferenceMode;
import org.eclipse.persistence.descriptors.ClassDescriptor;
import org.eclipse.persistence.descriptors.DescriptorEvent;
import org.eclipse.persistence.descriptors.DescriptorEventManager;
import org.eclipse.persistence.descriptors.changetracking.AttributeChangeTrackingPolicy;
import org.eclipse.persistence.descriptors.changetracking.ObjectChangePolicy;
import org.eclipse.persistence.exceptions.DatabaseException;
import org.eclipse.persistence.exceptions.DescriptorException;
import org.eclipse.persistence.exceptions.EclipseLinkException;
import org.eclipse.persistence.exceptions.OptimisticLockException;
import org.eclipse.persistence.exceptions.QueryException;
import org.eclipse.persistence.exceptions.ValidationException;
import org.eclipse.persistence.expressions.Expression;
import org.eclipse.persistence.indirection.ValueHolderInterface;
import org.eclipse.persistence.internal.databaseaccess.Accessor;
import org.eclipse.persistence.internal.databaseaccess.DatasourceAccessor;
import org.eclipse.persistence.internal.databaseaccess.Platform;
import org.eclipse.persistence.internal.descriptors.CascadeLockingPolicy;
import org.eclipse.persistence.internal.descriptors.DescriptorIterator;
import org.eclipse.persistence.internal.descriptors.DescriptorIterator.CascadeCondition;
import org.eclipse.persistence.internal.descriptors.ObjectBuilder;
import org.eclipse.persistence.internal.descriptors.PersistenceEntity;
import org.eclipse.persistence.internal.helper.ConcurrencyManager;
import org.eclipse.persistence.internal.helper.ConcurrencyUtil;
import org.eclipse.persistence.internal.helper.Helper;
import org.eclipse.persistence.internal.helper.IdentityHashSet;
import org.eclipse.persistence.internal.helper.IdentityWeakHashMap;
import org.eclipse.persistence.internal.identitymaps.CacheId;
import org.eclipse.persistence.internal.identitymaps.CacheKey;
import org.eclipse.persistence.internal.identitymaps.IdentityMapManager;
import org.eclipse.persistence.internal.indirection.DatabaseValueHolder;
import org.eclipse.persistence.internal.indirection.UnitOfWorkQueryValueHolder;
import org.eclipse.persistence.internal.indirection.UnitOfWorkTransformerValueHolder;
import org.eclipse.persistence.internal.localization.ExceptionLocalization;
import org.eclipse.persistence.internal.localization.LoggingLocalization;
import org.eclipse.persistence.internal.sequencing.Sequencing;
import org.eclipse.persistence.logging.AbstractSessionLog;
import org.eclipse.persistence.logging.SessionLog;
import org.eclipse.persistence.mappings.DatabaseMapping;
import org.eclipse.persistence.mappings.ForeignReferenceMapping;
import org.eclipse.persistence.mappings.foundation.AbstractTransformationMapping;
import org.eclipse.persistence.platform.server.ServerPlatform;
import org.eclipse.persistence.queries.Call;
import org.eclipse.persistence.queries.DatabaseQuery;
import org.eclipse.persistence.queries.DeleteObjectQuery;
import org.eclipse.persistence.queries.DoesExistQuery;
import org.eclipse.persistence.queries.InMemoryQueryIndirectionPolicy;
import org.eclipse.persistence.queries.ModifyAllQuery;
import org.eclipse.persistence.queries.ObjectBuildingQuery;
import org.eclipse.persistence.queries.ObjectLevelReadQuery;
import org.eclipse.persistence.queries.ReadObjectQuery;
import org.eclipse.persistence.queries.ReadQuery;
import org.eclipse.persistence.sessions.DatabaseRecord;
import org.eclipse.persistence.sessions.Session;
import org.eclipse.persistence.sessions.SessionProfiler;
import org.eclipse.persistence.sessions.coordination.MergeChangeSetCommand;
/**
* Implementation of org.eclipse.persistence.sessions.UnitOfWork
* The public interface should be used.
* @see org.eclipse.persistence.sessions.UnitOfWork
* <p>
* <b>Purpose</b>: To allow object level transactions.
* <p>
* <b>Description</b>: The unit of work is a session that implements all of the normal
* protocol of an EclipseLink session. It can be spawned from any other session including another unit of work.
* Objects can be brought into the unit of work through reading them or through registering them.
* The unit of work will operate on its own object space, that is the objects within the unit of work
* will be clones of the original objects. When the unit of work is committed, all changes to any objects
* registered within the unit of work will be committed to the database. A minimal commit/update will
* be performed and any foreign keys/circular reference/referential integrity will be resolved.
* If the commit to the database is successful the changed objects will be merged back into the unit of work
* parent session.
* <p>
* <b>Responsibilities</b>:
* <ul>
* <li> Allow parallel transactions against a session's objects.
* <li> Allow nested transactions.
* <li> Not require the application to write objects that is changes, automatically determine what has changed.
* <li> Perform a minimal commit/update of all changes that occurred.
* <li> Resolve foreign keys for newly created objects and maintain referential integrity.
* <li> Allow for the object transaction to use its own object space.
* </ul>
*
*/
public class UnitOfWorkImpl extends AbstractSession implements org.eclipse.persistence.sessions.UnitOfWork {
//These constants and variables are used in extended thread logging to compare UnitOfWork creation thread and thread which registering object in UnitOfWork
public final long CREATION_THREAD_ID = Thread.currentThread().getId();
public final String CREATION_THREAD_NAME = String.copyValueOf(Thread.currentThread().getName().toCharArray());
public final long CREATION_THREAD_HASHCODE = Thread.currentThread().hashCode();
private String creationThreadStackTrace;
/** Fix made for weak caches to avoid garbage collection of the originals. **/
/** As well as used as lookup in merge algorithm for aggregates and others **/
protected transient Map<Object, Object> cloneToOriginals;
protected transient AbstractSession parent;
/** Map of all the clones. The key contains the clone of the object. */
protected Map<Object, Object> cloneMapping;
protected Map<Object, Object> newObjectsCloneToOriginal;
protected Map<Object, Object> newObjectsOriginalToClone;
/**
* Stores a map from the clone to the original merged object, as a different instance is used as the original for merges.
*/
protected Map<Object, Object> newObjectsCloneToMergeOriginal;
protected Map<Object, Object> deletedObjects;
/** This member variable contains a copy of all of the clones for this particular UOW */
protected Map<Object, Object> allClones;
protected Map<Object, Object> objectsDeletedDuringCommit;
protected Map<Object, Object> removedObjects;
protected Map<Object, Object> unregisteredNewObjects;
protected Map<Object, Object> unregisteredNewObjectsInParent;
protected Map<Object, Object> unregisteredExistingObjects;
// bug # 3228185
// this collection is used to store the new objects from the parent.
// They will not be treated as new in the nested unit of work, so we must
//store them somewhere specifically to lookup later.
protected Map<Object, Object> newObjectsInParentOriginalToClone;
/** Cache references of private owned objects for the removal of private owned orphans */
protected Map<DatabaseMapping, Set> privateOwnedObjects;
/** used to store a list of the new objects in the parent */
//cr 2783
protected Map<Object, Object> newObjectsInParent;
protected Map<Object, Object> newAggregates;
/** This method is used to store the current changeSet for this UnitOfWork. */
protected UnitOfWorkChangeSet unitOfWorkChangeSet;
/** This is only used for EJB entity beans to manage beans accessed in a transaction context. */
protected UnitOfWorkImpl containerUnitOfWork;
protected Map<Object, Object> containerBeans;
/** use to track pessimistic locked objects */
protected Map<Object, Object> pessimisticLockedObjects;
/** Used to store the list of locks that this UnitOfWork has acquired for this merge */
protected transient MergeManager lastUsedMergeManager;
/**
* When in transaction batch read objects must use query local
* to the unit of work.
*/
protected Map<ReadQuery, ReadQuery> batchQueries;
/** Read-only class can be used for reference data to avoid cloning when not required. */
protected Set<Class> readOnlyClasses;
/** Flag indicating that the transaction for this UOW was already begun. */
protected boolean wasTransactionBegunPrematurely;
/** Allow for double merges of new objects by putting them into the cache. */
protected boolean shouldNewObjectsBeCached;
/** Flag indicating that deletes should be performed before other updates. */
protected boolean shouldPerformDeletesFirst;
/** Flag indicating how to deal with exceptions on conforming queries. **/
protected int shouldThrowConformExceptions;
/** The amount of validation can be configured. */
protected int validationLevel;
static public final int None = 0;
static public final int Partial = 1;
static public final int Full = 2;
/**
* With the new synchronized unit of work, need a lifecycle state variable to
* track birth, committed, pending_merge and death.
*/
protected int lifecycle;
public static final int Birth = 0;
public static final int CommitPending = 1;
// After a call to writeChanges() but before commit.
public static final int CommitTransactionPending = 2;
// After an unsuccessful call to writeChanges(). No recovery at all.
public static final int WriteChangesFailed = 3;
public static final int MergePending = 4;
public static final int Death = 5;
public static final int AfterExternalTransactionRolledBack = 6;
/** Used for Conforming Queries */
public static final int DO_NOT_THROW_CONFORM_EXCEPTIONS = 0;
public static final int THROW_ALL_CONFORM_EXCEPTIONS = 1;
//CR 3677 removed option to only throw valueHolderExceptions as this governed by
//the InMemoryQueryIndirectionPolicy
public static final String LOCK_QUERIES_PROPERTY = "LockQueriesProperties";
/** Used for merging dependent values without use of WL SessionAccessor */
protected static boolean SmartMerge = false;
/** Kept reference of read lock objects*/
protected Map<Object, Object> optimisticReadLockObjects;
/** Used for read lock to determine update the version field with the same value or increment value */
public static final String ReadLockOnly = "no update";
public static final String ReadLockUpdateVersion = "update version";
/** lazy initialization done in storeModifyAllQuery. For UpdateAllQuery, only clones of all UpdateAllQuery's (deferred and non-deferred) are stored here for validation only.*/
protected List<ModifyAllQuery> modifyAllQueries;
/**
* Contains deferred ModifyAllQuery's that have translation row for execution only.
* At commit their clones will be added to modifyAllQueries for validation afterwards.
* Array of the query (ModifyAllQuery) and translationRow (AbstractRecord).
*/
//Bug4607551
protected List<Object[]> deferredModifyAllQueries;
/**
* Used during the cloning process to track the recursive depth in. This will
* be used to determine at which point the process can begin to wait on locks
* without being concerned about creating deadlock situations.
*/
protected int cloneDepth;
/**
* PERF: Stores the JTA transaction to optimize activeUnitOfWork lookup.
*/
protected Object transaction;
/**
* True if UnitOfWork should be resumed on completion of transaction.
* Used when UnitOfWork is Synchronized with external transaction control
*/
protected boolean resumeOnTransactionCompletion;
/**
* PERF: Allows discover new objects to be skipped if app always calls persist.
*/
protected boolean shouldDiscoverNewObjects;
/**
* True if either DataModifyQuery or ModifyAllQuery was executed.
* Gets reset on commit, effects DoesExistQuery behavior and reading.
*/
protected boolean wasNonObjectLevelModifyQueryExecuted;
/**
* True if the value holder for the joined attribute should be triggered.
* Required by ejb30 fetch join.
*/
protected boolean shouldCascadeCloneToJoinedRelationship;
/** PERF: Cache isNestedUnitOfWork check. */
protected boolean isNestedUnitOfWork;
/** Determine if does-exist should be performed on persist. */
protected boolean shouldValidateExistence;
/** Allow updates and deletes to be ordered by id or changes to avoid possible deadlocks. */
protected CommitOrderType commitOrder;
/** This stored the reference mode for this UOW. If the reference mode is
* weak then this unit of work will retain only weak references to non new,
* non-deleted objects allowing for garbage collection. If ObjectChangeTracking
* is used then any objects with changes will not be garbage collected.
*/
protected ReferenceMode referenceMode;
// This is list is used during change tracking to keep hard references
// to changed objects that may otherwise have been garbage collected.
protected Set<Object> changeTrackedHardList;
/** Used to store objects already deleted from the db and unregistered */
protected Map<Object, Object> unregisteredDeletedObjectsCloneToBackupAndOriginal;
/** This attribute records when the preDelete stage of Commit has completed */
protected boolean preDeleteComplete;
/** Stores all of the private owned objects that have been removed and may need to cascade deletion */
protected Map<DatabaseMapping, List<Object>> deletedPrivateOwnedObjects;
/** temporarily holds a reference to a merge manager that is calling this UnitOfWork during merge **/
protected transient MergeManager mergeManagerForActiveMerge;
/** Set of objects that were deleted by database cascade delete constraints. */
protected Set<Object> cascadeDeleteObjects;
/**
* Used to store deleted objects that have reference to other deleted objects.
* This is need to delete cycles of objects in the correct order.
*/
protected Map<Object, Set<Object>> deletionDependencies;
/**
* INTERNAL:
*/
public UnitOfWorkImpl() {
}
/**
* INTERNAL:
* Create and return a new unit of work with the session as its parent.
*/
public UnitOfWorkImpl(AbstractSession parent, ReferenceMode referenceMode) {
super();
this.isLoggingOff = parent.isLoggingOff;
this.referenceMode = referenceMode;
this.shouldDiscoverNewObjects = true;
this.name = parent.name;
this.parent = parent;
this.project = parent.project;
this.profiler = parent.profiler;
this.isInProfile = parent.isInProfile;
this.sessionLog = parent.sessionLog;
if (parent.hasEventManager()) {
this.eventManager = parent.getEventManager().clone(this);
}
this.exceptionHandler = parent.exceptionHandler;
this.pessimisticLockTimeoutDefault = parent.pessimisticLockTimeoutDefault;
this.pessimisticLockTimeoutUnitDefault = parent.pessimisticLockTimeoutUnitDefault;
this.queryTimeoutDefault = parent.queryTimeoutDefault;
this.queryTimeoutUnitDefault = parent.queryTimeoutUnitDefault;
this.shouldOptimizeResultSetAccess = parent.shouldOptimizeResultSetAccess;
this.serializer = parent.serializer;
this.isConcurrent = parent.isConcurrent;
// Initialize the readOnlyClasses variable.
this.setReadOnlyClasses(parent.copyReadOnlyClasses());
this.validationLevel = Partial;
// for 3.0.x this conforming queries will not throw exceptions unless explicitly asked to
this.shouldThrowConformExceptions = DO_NOT_THROW_CONFORM_EXCEPTIONS;
// initialize lifecycle state variable
this.lifecycle = Birth;
// PERF: Cache the write-lock check to avoid cost of checking in every register/clone.
this.isNestedUnitOfWork = parent.isUnitOfWork();
if (this.eventManager != null) {
this.eventManager.postAcquireUnitOfWork();
}
this.descriptors = parent.getDescriptors();
incrementProfile(SessionProfiler.UowCreated);
// PERF: Cache the write-lock check to avoid cost of checking in every register/clone.
this.shouldCheckWriteLock = parent.getDatasourceLogin().shouldSynchronizedReadOnWrite() || parent.getDatasourceLogin().shouldSynchronizeWrites();
// Order updates by id
this.commitOrder = CommitOrderType.ID;
// Copy down the table per tenant information.
this.tablePerTenantDescriptors = parent.tablePerTenantDescriptors;
this.tablePerTenantQueries = parent.tablePerTenantQueries;
// Init only if thread extended logging + thread dump is enabled
creationThreadStackTrace = project.allowExtendedThreadLoggingThreadDump() ? ConcurrencyUtil.SINGLETON.enrichGenerateThreadDumpForCurrentThread() : null;
}
/**
* INTERNAL:
* Acquires a special historical session for reading objects as of a past time.
*/
@Override
public org.eclipse.persistence.sessions.Session acquireHistoricalSession(org.eclipse.persistence.history.AsOfClause clause) throws ValidationException {
throw ValidationException.cannotAcquireHistoricalSession();
}
/**
* PUBLIC:
* Return a nested unit of work for this unit of work.
* A nested unit of work can be used to isolate a subset of work on the unit of work,
* such as a dialog being open from an editor. The nested unit of work will only
* commit change to its objects to its parent unit of work, not the database.
* Only the parent unit of work will commit to the database.
*
* @see UnitOfWorkImpl
*/
@Override
public UnitOfWorkImpl acquireUnitOfWork() {
UnitOfWorkImpl uow = super.acquireUnitOfWork();
uow.discoverAllUnregisteredNewObjectsInParent();
return uow;
}
/**
* INTERNAL:
* Records a private owned object that has been de-referenced and will need to processed
* for related private owned objects.
*/
public void addDeletedPrivateOwnedObjects(DatabaseMapping mapping, Object object)
{
if(deletedPrivateOwnedObjects == null){
deletedPrivateOwnedObjects = new IdentityHashMap();
}
List<Object> list = deletedPrivateOwnedObjects.get(mapping);
if(list == null){
list = new ArrayList<>();
deletedPrivateOwnedObjects.put(mapping, list);
}
list.add(object);
}
/**
* INTERNAL:
* Register a new aggregate object with the unit of work.
*/
public void addNewAggregate(Object originalObject) {
getNewAggregates().put(originalObject, originalObject);
}
/**
* INTERNAL:
* Add object deleted during root commit of unit of work.
*/
public void addObjectDeletedDuringCommit(Object object, ClassDescriptor descriptor) {
// The object's key is keyed on the object, this avoids having to compute the key later on.
getObjectsDeletedDuringCommit().put(object, keyFromObject(object, descriptor));
//bug 4730595: changed to add deleted objects to the changesets.
((UnitOfWorkChangeSet)getUnitOfWorkChangeSet()).addDeletedObject(object, this);
}
/**
* PUBLIC:
* Adds the given Java class to the receiver's set of read-only classes.
* Cannot be called after objects have been registered in the unit of work.
*/
@Override
public void addReadOnlyClass(Class theClass) throws ValidationException {
if (!canChangeReadOnlySet()) {
throw ValidationException.cannotModifyReadOnlyClassesSetAfterUsingUnitOfWork();
}
getReadOnlyClasses().add(theClass);
ClassDescriptor descriptor = getDescriptor(theClass);
// Also mark all subclasses as read-only.
if (descriptor.hasInheritance()) {
for (ClassDescriptor childDescriptor : descriptor.getInheritancePolicy().getChildDescriptors()) {
addReadOnlyClass(childDescriptor.getJavaClass());
}
}
}
/**
* PUBLIC:
* Adds the classes in the given Vector to the existing set of read-only classes.
* Cannot be called after objects have been registered in the unit of work.
*/
@Override
public void addReadOnlyClasses(Collection classes) {
for (Iterator iterator = classes.iterator(); iterator.hasNext();) {
Class theClass = (Class)iterator.next();
addReadOnlyClass(theClass);
}
}
/**
* INTERNAL:
* Register that an object was removed in a nested unit of work.
*/
public void addRemovedObject(Object orignal) {
getRemovedObjects().put(orignal, orignal);// Use as set.
}
/**
* ADVANCED:
* Assign sequence number to the object.
* This allows for an object's id to be assigned before commit.
* It can be used if the application requires to use the object id before the object exists on the database.
* Normally all ids are assigned during the commit automatically.
*/
@Override
public void assignSequenceNumber(Object object) throws DatabaseException {
ClassDescriptor descriptor = getDescriptor(object);
Object implementation = descriptor.getObjectBuilder().unwrapObject(object, this);
assignSequenceNumber(implementation, descriptor);
}
/**
* INTERNAL:
* Assign sequence number to the object.
*/
public Object assignSequenceNumber(Object object, ClassDescriptor descriptor) throws DatabaseException {
Object value = null;
// This is done outside of a transaction to ensure optimal concurrency and deadlock avoidance in the sequence table.
if (descriptor.usesSequenceNumbers() && !descriptor.getSequence().shouldAcquireValueAfterInsert()) {
startOperationProfile(SessionProfiler.AssignSequence);
ObjectBuilder builder = descriptor.getObjectBuilder();
try {
value = builder.assignSequenceNumber(object, this);
} catch (RuntimeException exception) {
handleException(exception);
} finally {
endOperationProfile(SessionProfiler.AssignSequence);
}
}
return value;
}
/**
* ADVANCED:
* Assign sequence numbers to all new objects registered in this unit of work,
* or any new objects reference by any objects registered.
* This allows for an object's id to be assigned before commit.
* It can be used if the application requires to use the object id before the object exists on the database.
* Normally all ids are assigned during the commit automatically.
*/
@Override
public void assignSequenceNumbers() throws DatabaseException {
// This should be done outside of a transaction to ensure optimal concurrency and deadlock avoidance in the sequence table.
// discoverAllUnregisteredNewObjects() should be called no matter whether sequencing used
// or not, because collectAndPrepareObjectsForCommit() method (which calls assignSequenceNumbers())
// needs it.
// It would be logical to remove discoverAllUnregisteredNewObjects() from assignSequenceNumbers()
// and make collectAndPrepareObjectsForCommit() to call discoverAllUnregisteredNewObjects()
// first and assignSequenceNumbers() next,
// but assignSequenceNumbers() is a public method which could be called by user - and
// in this case discoverAllUnregisteredNewObjects() is needed again (though
// if sequencing is not used the call will make no sense - but no harm, too).
discoverAllUnregisteredNewObjects();
if (hasUnregisteredNewObjects()) {
assignSequenceNumbers(getUnregisteredNewObjects());
}
if (hasNewObjects()) {
assignSequenceNumbers(getNewObjectsCloneToOriginal());
}
}
/**
* INTERNAL:
* Assign sequence numbers to all of the objects.
* This allows for an object's id to be assigned before commit.
* It can be used if the application requires to use the object id before the object exists on the database.
* Normally all ids are assigned during the commit automatically.
*/
protected void assignSequenceNumbers(Map objects) throws DatabaseException {
if (objects.isEmpty()) {
return;
}
Sequencing sequencing = getSequencing();
if (sequencing == null) {
return;
}
int whenShouldAcquireValueForAll = sequencing.whenShouldAcquireValueForAll();
if (whenShouldAcquireValueForAll == Sequencing.AFTER_INSERT) {
return;
}
boolean shouldAcquireValueBeforeInsertForAll = whenShouldAcquireValueForAll == Sequencing.BEFORE_INSERT;
startOperationProfile(SessionProfiler.AssignSequence);
Iterator newObjects = objects.keySet().iterator();
while (newObjects.hasNext()) {
Object object = newObjects.next();
ClassDescriptor descriptor = getDescriptor(object);
if (descriptor.usesSequenceNumbers()
&& (shouldAcquireValueBeforeInsertForAll || !descriptor.getSequence().shouldAcquireValueAfterInsert())) {
descriptor.getObjectBuilder().assignSequenceNumber(object, this);
}
}
endOperationProfile(SessionProfiler.AssignSequence);
}
/**
* PUBLIC:
* Tell the unit of work to begin a transaction now.
* By default the unit of work will begin a transaction at commit time.
* The default is the recommended approach, however sometimes it is
* necessary to start the transaction before commit time. When the
* unit of work commits, this transaction will be committed.
*
* @see #commit()
* @see #release()
*/
@Override
public void beginEarlyTransaction() throws DatabaseException {
beginTransaction();
setWasTransactionBegunPrematurely(true);
}
/**
* INTERNAL:
* This is internal to the uow, transactions should not be used explicitly in a uow.
* The uow shares its parents transactions.
*/
@Override
public void beginTransaction() throws DatabaseException {
this.parent.beginTransaction();
}
/**
* INTERNAL:
* Unregistered new objects have no original so we must create one for commit and resume and
* to put into the parent. We can NEVER let the same copy of an object exist in multiple units of work.
*/
public Object buildOriginal(Object workingClone) {
ClassDescriptor descriptor = getDescriptor(workingClone);
ObjectBuilder builder = descriptor.getObjectBuilder();
Object original = builder.instantiateClone(workingClone, this);
// If no original exists can mean any of the following:
// -A RemoteUnitOfWork and cloneToOriginals is transient.
// -A clone read while in transaction, and built directly from
// the database row with no intermediary original.
// -An unregistered new object
if (checkIfAlreadyRegistered(workingClone, descriptor) != null) {
getCloneToOriginals().put(workingClone, original);
return original;
} else {
// Assume it is an unregisteredNewObject, but this is worrisome, as
// it may be an unregistered existing object, not in the parent cache?
Object backup = builder.instantiateClone(workingClone, this);
// Original is fine for backup as state is the same.
getCloneMapping().put(workingClone, backup);
// Must register new instance / clone as the original.
getNewObjectsCloneToOriginal().put(workingClone, original);
getNewObjectsOriginalToClone().put(original, workingClone);
// no need to register in identity map as the DatabaseQueryMechanism will have
//placed the object in the identity map on insert. bug 3431586
}
return original;
}
/**
* INTERNAL:
* <p> This calculates changes in two passes, first on registered objects,
* second it discovers unregistered new objects on only those objects that changed, and calculates their changes.
* This also assigns sequence numbers to new objects.
*/
public UnitOfWorkChangeSet calculateChanges(Map registeredObjects, UnitOfWorkChangeSet changeSet, boolean assignSequences, boolean shouldCloneMap) {
// Fire the event first which may add to the registered objects. If we
// need to clone the registered objects, it should be done after this
// call.
if (this.eventManager != null) {
this.eventManager.preCalculateUnitOfWorkChangeSet();
}
Map allObjects = (shouldCloneMap) ? cloneMap(registeredObjects) : registeredObjects;
if (assignSequences && hasNewObjects()) {
// First assign sequence numbers to new objects.
assignSequenceNumbers(this.newObjectsCloneToOriginal);
}
// Second calculate changes for all registered objects.
Iterator objects = allObjects.keySet().iterator();
Map changedObjects = new IdentityHashMap();
Map visitedNodes = new IdentityHashMap();
while (objects.hasNext()) {
Object object = objects.next();
// Block of code removed because it will never be touched see bug # 2903565
ClassDescriptor descriptor = getDescriptor(object);
// Update any derived id's.
updateDerivedIds(object, descriptor);
// Block of code removed for code coverage, as it would never have been touched. bug # 2903600
boolean isNew = isCloneNewObject(object);
// Use the object change policy to determine if we should run a comparison for this object - TGW.
if (isNew || descriptor.getObjectChangePolicy().shouldCompareExistingObjectForChange(object, this, descriptor)) {
ObjectChangeSet changes = null;
if (isNew) {
changes = descriptor.getObjectChangePolicy().calculateChangesForNewObject(object, changeSet, this, descriptor, true);
} else {
changes = descriptor.getObjectChangePolicy().calculateChangesForExistingObject(object, changeSet, this, descriptor, true);
}
if (changes != null) {
changeSet.addObjectChangeSet(changes, this, true);
changedObjects.put(object, object);
if (changes.hasChanges() && !changes.hasForcedChangesFromCascadeLocking()) {
if (descriptor.hasCascadeLockingPolicies()) {
for (CascadeLockingPolicy policy : descriptor.getCascadeLockingPolicies()) {
policy.lockNotifyParent(object, changeSet, this);
}
} else if (descriptor.usesOptimisticLocking() && descriptor.getOptimisticLockingPolicy().isCascaded()) {
changes.setHasForcedChangesFromCascadeLocking(true);
}
}
} else {
// Mark as visited so do not need to traverse.
visitedNodes.put(object, object);
}
} else {
// Mark as visited so do not need to traverse.
visitedNodes.put(object, object);
}
}
if (hasDeletedObjects() && !isNestedUnitOfWork()) {
for (Object deletedObject : ((IdentityHashMap)((IdentityHashMap)this.deletedObjects).clone()).keySet()) {
getDescriptor(deletedObject).getObjectBuilder().recordPrivateOwnedRemovals(deletedObject, this, true);
}
}
if ((this.deletedPrivateOwnedObjects != null) && !this.isNestedUnitOfWork) {
for (Map.Entry<DatabaseMapping, List<Object>> entry : this.deletedPrivateOwnedObjects.entrySet()) {
DatabaseMapping databasemapping = entry.getKey();
for (Object deletedObject : entry.getValue()) {
databasemapping.getReferenceDescriptor().getObjectBuilder().recordPrivateOwnedRemovals(deletedObject, this, false);
}
}
this.deletedPrivateOwnedObjects.clear();
}
if (this.project.hasMappingsPostCalculateChangesOnDeleted()) {
if (hasDeletedObjects()) {
for (Iterator deletedObjects = getDeletedObjects().keySet().iterator(); deletedObjects.hasNext();) {
Object deletedObject = deletedObjects.next();
ClassDescriptor descriptor = getDescriptor(deletedObject);
if(descriptor.hasMappingsPostCalculateChangesOnDeleted()) {
int size = descriptor.getMappingsPostCalculateChangesOnDeleted().size();
for(int i=0; i < size; i++) {
DatabaseMapping mapping = descriptor.getMappingsPostCalculateChangesOnDeleted().get(i);
mapping.postCalculateChangesOnDeleted(deletedObject, changeSet, this);
}
}
}
}
}
if (this.shouldDiscoverNewObjects && !changedObjects.isEmpty()) {
// Third discover any new objects from the new or changed objects.
Map newObjects = new IdentityHashMap();
// Bug 294259 - Do not replace the existingObjects list
// Iterate over the changed objects only.
discoverUnregisteredNewObjects(changedObjects, newObjects, getUnregisteredExistingObjects(), visitedNodes);
setUnregisteredNewObjects(newObjects);
if (assignSequences) {
assignSequenceNumbers(newObjects);
}
for (Iterator newObjectsEnum = newObjects.values().iterator(); newObjectsEnum.hasNext(); ) {
Object object = newObjectsEnum.next();
ClassDescriptor descriptor = getDescriptor(object);
ObjectChangeSet changes = descriptor.getObjectChangePolicy().calculateChangesForNewObject(object, changeSet, this, descriptor, true);
// Since it is new, it will always have a change set.
changeSet.addObjectChangeSet(changes, this, true);
}
}
// Remove any orphaned privately owned objects from the UnitOfWork and ChangeSets,
// these are the objects remaining in the UnitOfWork privateOwnedObjects map
if (hasPrivateOwnedObjects()) {
Map visitedObjects = new IdentityHashMap();
for (Set privateOwnedObjects : getPrivateOwnedObjects().values()) {
for (Object objectToRemove : privateOwnedObjects) {
performRemovePrivateOwnedObjectFromChangeSet(objectToRemove, visitedObjects);
}
}
this.privateOwnedObjects.clear();
}
if (this.eventManager != null) {
this.eventManager.postCalculateUnitOfWorkChangeSet(changeSet);
}
return changeSet;
}
/**
* INTERNAL:
* Checks whether the receiver has been used. i.e. objects have been registered.
*
* @return true or false depending on whether the read-only set can be changed or not.
*/
protected boolean canChangeReadOnlySet() {
return !hasCloneMapping() && !hasDeletedObjects();
}
/**
* INTERNAL:
* Return if the object is an existing object (but has not been registered),
* or a new object (that has not be persisted).
*/
public boolean checkForUnregisteredExistingObject(Object object) {
ClassDescriptor descriptor = getDescriptor(object.getClass());
Object primaryKey = descriptor.getObjectBuilder().extractPrimaryKeyFromObject(object, this, true);
if (primaryKey == null) {
return false;
}
DoesExistQuery existQuery = descriptor.getQueryManager().getDoesExistQuery();
existQuery = (DoesExistQuery) existQuery.clone();
existQuery.setObject(object);
existQuery.setPrimaryKey(primaryKey);
existQuery.setDescriptor(descriptor);
existQuery.setIsExecutionClone(true);
return ((Boolean) executeQuery(existQuery)).booleanValue();
}
/**
* INTERNAL: Register the object and return the clone if it is existing
* otherwise return null if it is new. The unit of work determines existence
* during registration, not during the commit.
*/
public Object checkExistence(Object object) {
ClassDescriptor descriptor = getDescriptor(object.getClass());
Object primaryKey = descriptor.getObjectBuilder().extractPrimaryKeyFromObject(object, this, true);
// PERF: null primary key cannot exist.
if (primaryKey == null) {
return null;
}
DoesExistQuery existQuery = descriptor.getQueryManager().getDoesExistQuery();
// PERF: Avoid cost of query execution as normally can determine from checkEarlyReturn.
Boolean exists = (Boolean)existQuery.checkEarlyReturn(object, primaryKey, this, null);
if (exists == null) {
// Need to execute database query.
existQuery = (DoesExistQuery)existQuery.clone();
existQuery.setObject(object);
existQuery.setPrimaryKey(primaryKey);
existQuery.setDescriptor(descriptor);
existQuery.setIsExecutionClone(true);
exists = (Boolean)executeQuery(existQuery);
}
if (exists) {
//we know if it exists or not, now find or register it
Object objectFromCache = getIdentityMapAccessorInstance().getFromIdentityMap(primaryKey, object.getClass(), descriptor);
if (objectFromCache != null) {
// Ensure that the registered object is the one from the parent cache.
if (shouldPerformFullValidation()) {
if ((objectFromCache != object) && (this.parent.getIdentityMapAccessorInstance().getFromIdentityMap(primaryKey, object.getClass(), descriptor) != object)) {
throw ValidationException.wrongObjectRegistered(object, objectFromCache);
}
}
// Has already been cloned.
if (!this.isObjectDeleted(objectFromCache))
return objectFromCache;
}
// This is a case where the object is not in the session cache,
// so a new cache-key is used as there is no original to use for locking.
// It read time must be set to avoid it being invalidated.
CacheKey cacheKey = new CacheKey(primaryKey);
cacheKey.setReadTime(System.currentTimeMillis());
cacheKey.setIsolated(true); // if the cache does not have a version then this must be built from the supplied version
return cloneAndRegisterObject(object, cacheKey, descriptor);
} else {
return null;
}
}
/**
* INTERNAL:
* Return the value of the object if it already is registered, otherwise null.
*/
public Object checkIfAlreadyRegistered(Object object, ClassDescriptor descriptor) {
// Don't register read-only classes
if (isClassReadOnly(object.getClass(), descriptor)) {
return null;
}
// Check if the working copy is again being registered in which case we return the same working copy
Object registeredObject = getCloneMapping().get(object);
if (registeredObject != null) {
return object;
}
// Check if object exists in my new objects if it is in the new objects cache then it means domain object is being
// re-registered and we should return the same working clone. This check holds only for the new registered objects
// PERF: Avoid initialization of new objects if none.
if (hasNewObjects()) {
registeredObject = getNewObjectsOriginalToClone().get(object);
if (registeredObject != null) {
return registeredObject;
}
}
if (this.isNestedUnitOfWork) {
// bug # 3228185
//may be a new object from a parent Unit Of Work, let's check our new object in parent list to see
//if it has already been registered locally
if (hasNewObjectsInParentOriginalToClone()) {
registeredObject = getNewObjectsInParentOriginalToClone().get(object);
}
if (registeredObject != null) {
return registeredObject;
}
}
return null;
}
/**
* INTERNAL:
* Check if the object is invalid and *should* be refreshed.
* This is used to ensure that no invalid objects are cloned.
*/
@Override
public boolean isConsideredInvalid(Object object, CacheKey cacheKey, ClassDescriptor descriptor) {
if (! isNestedUnitOfWork){
return getParent().isConsideredInvalid(object, cacheKey, descriptor);
}
return false;
}
/**
* ADVANCED:
* Register the new object with the unit of work.
* This will register the new object with cloning.
* Normally the registerObject method should be used for all registration of new and existing objects.
* This version of the register method can only be used for new objects.
* This method should only be used if a new object is desired to be registered without an existence Check.
*
* @see #registerObject(Object)
*/
protected Object cloneAndRegisterNewObject(Object original, boolean isShallowClone) {
ClassDescriptor descriptor = getDescriptor(original);
//Nested unit of work is not supported for attribute change tracking
if (this.isNestedUnitOfWork && (descriptor.getObjectChangePolicy() instanceof AttributeChangeTrackingPolicy)) {
throw ValidationException.nestedUOWNotSupportedForAttributeTracking();
}
ObjectBuilder builder = descriptor.getObjectBuilder();
// bug 2612602 create the working copy object.
Object clone = builder.instantiateWorkingCopyClone(original, this);
// Must put in the original to clone to resolve circular refs.
getNewObjectsOriginalToClone().put(original, clone);
getNewObjectsCloneToOriginal().put(clone, original);
// Must put in clone mapping.
getCloneMapping().put(clone, clone);
if (isShallowClone) {
builder.copyInto(original, clone, true);
} else {
builder.populateAttributesForClone(original, null, clone, null, this);
}
// Must reregister in both new objects.
registerNewObjectClone(clone, original, descriptor);
//Build backup clone for DeferredChangeDetectionPolicy or ObjectChangeTrackingPolicy,
//but not for AttributeChangeTrackingPolicy
Object backupClone = descriptor.getObjectChangePolicy().buildBackupClone(clone, builder, this);
getCloneMapping().put(clone, backupClone);// The backup clone must be updated.
executeDeferredEvents();
return clone;
}