|
Well, I never actually use true "auto-increment" fields because they can be problematic for a variety of reasons. Instead I write my own routines to assign a unique auto-increment style key.
Using a GUID certainly guarantees a unique key. My big question would be how it affects performance. I don't know - you could do some tests maybe. But my uneducated guess would be that if the data tables get pretty large, indexing on a 128 bit value could make things start to really grind when working with a large data set.
Robert
|
|
|
|
|
It works like a charm!
We have a database with about 100 tables, all of which have GUIDs as their primary key.
In order to improve performance, we use a special form of GUID we call "Smart GUID" internally. We found this information somewhere on the web [^]. Basically, the GUID is generated based on the date/time of the client machine. It helps sorting and indexing things because it groups rows created around the same time together. It improves indexing drastically.
Anyway, here's the code we came up with.
<br />
<br />
using Microsoft.VisualBasic;<br />
using System;<br />
using System.Collections;<br />
using System.Data;<br />
using System.Diagnostics;<br />
namespace AdonisTech.Adonis2005.Core.Business {<br />
[ Serializable() ]<br />
public sealed class SmartGuid { <br />
private System.Guid mGuid; <br />
<br />
public SmartGuid() { <br />
mGuid = NewComb(); <br />
} <br />
public SmartGuid( byte[] b ) { <br />
mGuid = new Guid( b ); <br />
} <br />
public SmartGuid( UInt32 a, UInt16 b, UInt16 c, byte d, byte e, byte f, byte g, byte h, byte i, byte j, byte k ) { <br />
mGuid = new Guid( a, b, c, d, e, f, g, h, i, j, k ); <br />
} <br />
public SmartGuid( string g ) { <br />
mGuid = new Guid( g ); <br />
} <br />
public SmartGuid( int a, short b, short c, byte[] d ) { <br />
mGuid = new Guid( a, b, c, d ); <br />
} <br />
public SmartGuid( int a, short b, short c, byte d, byte e, byte f, byte g, byte h, byte i, byte j, byte k ) { <br />
mGuid = new Guid( a, b, c, d, e, f, g, h, i, j, k ); <br />
} <br />
<br />
public override int GetHashCode() { <br />
return mGuid.GetHashCode(); <br />
} <br />
<br />
<br />
public override bool Equals( object o ) { <br />
return mGuid.Equals( o ); <br />
} <br />
<br />
public static bool Equals( object objA, object objB ) { <br />
return System.Guid.Equals( objA, objB ); <br />
} <br />
<br />
<br />
public int CompareTo( object value ) { <br />
return mGuid.CompareTo( value ); <br />
} <br />
<br />
<br />
public override string ToString() { <br />
return mGuid.ToString(); <br />
} <br />
<br />
public string ToString( string format ) { <br />
return mGuid.ToString( format ); <br />
} <br />
<br />
public string ToString( string format, System.IFormatProvider provider ) { <br />
return mGuid.ToString( format, provider ); <br />
} <br />
<br />
<br />
public static System.Guid NewGuid() { <br />
return NewComb(); <br />
} <br />
<br />
private static Guid NewComb() { <br />
byte[] guidArray = System.Guid.NewGuid().ToByteArray(); <br />
DateTime baseDate = new DateTime( 1900, 1, 1 ); <br />
DateTime now = DateTime.Now; <br />
<br />
TimeSpan days = new TimeSpan( now.Ticks - baseDate.Ticks ); <br />
TimeSpan msecs = new TimeSpan( now.Ticks - ( new DateTime( now.Year, now.Month, now.Day ).Ticks ) ); <br />
<br />
<br />
byte[] daysArray = BitConverter.GetBytes( days.Days ); <br />
byte[] msecsArray = BitConverter.GetBytes( System.Convert.ToInt64( msecs.TotalMilliseconds / 3.333333 ) ); <br />
<br />
Array.Reverse( daysArray ); <br />
Array.Reverse( msecsArray ); <br />
<br />
Array.Copy( daysArray, daysArray.Length - 2, guidArray, guidArray.Length - 6, 2 ); <br />
Array.Copy( msecsArray, msecsArray.Length - 4, guidArray, guidArray.Length - 4, 4 ); <br />
<br />
return new System.Guid( guidArray ); <br />
} <br />
<br />
<br />
public DateTime GetDateTime() { <br />
return GetCombDateTime( mGuid ); <br />
} <br />
<br />
public static DateTime GetDateTime( Guid o ) { <br />
return GetCombDateTime( o ); <br />
} <br />
<br />
<br />
private static DateTime GetCombDateTime( Guid guid ) { <br />
DateTime baseDate = new DateTime( 1900, 1, 1 ); <br />
byte[] daysArray = new byte[ 5 ]; <br />
byte[] msecsArray = new byte[ 5 ]; <br />
byte[] guidArray = guid.ToByteArray(); <br />
<br />
Array.Copy( guidArray, guidArray.Length - 6, daysArray, 2, 2 ); <br />
Array.Copy( guidArray, guidArray.Length - 4, msecsArray, 0, 4 ); <br />
<br />
Array.Reverse( daysArray ); <br />
Array.Reverse( msecsArray ); <br />
<br />
int days = BitConverter.ToInt32( daysArray, 0 ); <br />
int msecs = BitConverter.ToInt32( msecsArray, 0 ); <br />
<br />
DateTime myDate = baseDate.AddDays( days ); <br />
myDate = myDate.AddMilliseconds( msecs * 3.333333 ); <br />
<br />
return myDate; <br />
} <br />
<br />
} <br />
<br />
} <br />
|
|
|
|
|
C a r l wrote:
Anyway, here's the code we came up with.
This looks great! Thanks for the info and the code example. We'll definitely make use of this!
Marc
MyXaml
Advanced Unit Testing
YAPO
|
|
|
|
|
In my experience generating the keys on the client to send to the server makes things much easier. The databases I've used it on have only had tables on the order of a million records per table and smaller, database size only several GB, so I can't say much about performance. The performance penalties for missing indexes usually dwarf those from data size anyway, so I wouldn't be too concerned.
They are espeically convienient for things like web applications where you want to eliminiate double post issues and things like that. Since you have a unique transaction ID, you can easily check that you aren't duplicating operations.
I can imagine the sinking feeling one would have after ordering my book,
only to find a laughably ridiculous theory with demented logic once the book arrives - Mark McCutcheon
|
|
|
|
|
Yes. I've done it both ways, and I definitely prefer using unique IDs to sequences/identity columns.
The reasons have been pretty well covered here. There are some minor drawbacks, such as making visual inspection of the data a little more difficult (e.g. you're trying to glance at a small sample of data to get an idea of how related records hang together), and it takes a little getting used to in that respect, but the benefits outweigh the aggravations for me.
The value of using a guaranteed "unique" id was underscored for me recently while I was working on a project using Oracle, which has no Guid type intrinsically. The designers had established sequences and insert triggers to keep the sequences incrementing however, after some 50,000 rows of data or so, one of the sequences got dropped and recreated with an arbitrary seed value lower than the highest previous sequence value. That was bad enough; what made it worse was that the code responsible for inserting rows into the affected table did not differentiate between inserts and updates. The method was passed a PK value (integer), which in the case of new records was the next value in the sequence, and for existing records was their current PK value. The logic was similar to:
1. Does that sequence value exist in the database?
2. If step 1 is False, insert a new record (correct behavior at all times)
3. If step 1 is True, update the record with the provided PK value with all of the data provided (this was a killer, since the renumbered sequence was now sometimes providing "used" PK values for new records, overwriting entire records inadvertently - it was even harder to locate the problem since deletions made "holes" in the tables PK range, so sometimes new records with out-of-sequence PKs would insert just fine).
Guids are goooood.
Hope this helps.
The most exciting phrase to hear in science, the one that heralds the most discoveries, is not 'Eureka!' ('I found it!') but 'That's funny...’
|
|
|
|
|
In MS SQL Server, PKs are clustered indexes, and GUID will be all over the map. This might affect insert performance greatly. I would probably use a simple incrementing integer PK and have the GUID as a unique constraint. When merging, it doesn't matter that the PK changes, because you can still query by GUID.
That said, I used GUID as PKs for a small project once and it worked OK.
|
|
|
|
|
For my SQL Server work, I alway use a guid uniqueidentifer for my primary keys, which are usually created as the default value for the column with newid()
I once used client side generated GUID's for primary keys on an Oracle DB system, as Oracle didn't support guid's as unique identifiers. As the system was for a large call centre and each unique incoming telephone call was given a unique guid in the database so that it could be linked back to a booking record in their reservation system.
My call tracking software generated a guid for each call and wrote a record to the tracking DB as the primary key, it would then attach the guid to the call and then the booking system would read the guid and store it in the oracle db. The records in the two different tables where then PK/FK linked.
Michael
CP Blog [^] Development Blog [^]
|
|
|
|
|
I used them between a central SQL Server and Remote Clients operating with MS Access. They solved the problem quite well.
Things to think about
=====================
(1) Key is 4 time larger therefore, slower joins on large tables. Less keys fit on each 8k page.
(2) They are a pain when dealing with some DB maintenance issues. It is difficult to hand fix a row if it is based on a GUID.
(3) Some aggregate commands don't work with Guids (i.e. MIN, MAX).
(4) You will loss insert order. I know you aren't supposed to rely on an Identities order but, I end up using it anyway when debugging some runtime issues. Just add a timestamp to solve this issue.
|
|
|
|
|
Marc,
Take a look at my reply to rwestgraham. I think it will be of interest for you.
Take it easy!
|
|
|
|
|
hi,
I already have the oracle installed in my pc my friend did it.I dont know the server name of this server.I am trying to access the tables in oracle but in order to know that I need to know the server name.Can anyone help me in finding the server name of the oracle already installed.
Thanking you in advance
Satish
|
|
|
|
|
I don't know anything about Oracle, so this could be completely wrong, but I would have thought the server name would be the same as the machine name. I'm guessing that the client software would attempt to find the Oracle database on a network and to do that it would need to know the name (or IP) of the machine that Oracle is running on and that machine name is also the server name. If Oracle supports multiple server processes on one machine each process may need a separate named instance.
Anyway, Just a thought you could try out. (Maybe "localhost", or 127.0.0.1 will work)
My: Blog | Photos | Next SQL Presentation
WDevs.com - Open Source Code Hosting, Blogs, FTP, Mail and More
|
|
|
|
|
I haven't used oracle since oracle 9, so this might be out of date. You need to find the tnsnames.ora file installed on your hard drive in the oracle install directory. This will have a list of all the server names that are registered and you have access to. Open this file in notepad to view all the registered server names.
I can imagine the sinking feeling one would have after ordering my book,
only to find a laughably ridiculous theory with demented logic once the book arrives - Mark McCutcheon
|
|
|
|
|
When i call a stored procedure to create a table named DETHI
The table DETHI create is owned by ASPNET account. But when i
executed a select statement(SELECT * FROM DETHI), SQL Server reported 'Invalid object named DETHI'
I can't solve this problem
Please help, thanks in advance
|
|
|
|
|
It may have created your table using the name aspnet.DETHI meaning the owner of the table is aspnet user.
Try specifying CREATE TABLE dbo.DETHI in your stored procedure or access the table using SELECT * FROM aspnet.DETHI.
Hope it helps,
Edbert
|
|
|
|
|
What kind of Errors the SELECT statement can raise??
In what cases?
How to avoid them?
How to handle them?
------------------------------------
To study, study and only to study
|
|
|
|
|
_J_ wrote:
What kind of Errors the SELECT statement can raise??
That's kind of like asking "What kind of accidents can my car be involved in?"
_J_ wrote:
In what cases?
When something goes wrong or not specified correctly.
_J_ wrote:
How to avoid them?
Get your permissions sorted out. Don't refer to things that don't exist. Get the syntax right.
_J_ wrote:
How to handle them?
DECLARE @error_number
SELECT .... -- Your select statement goes here
SET @error_number = @@ERROR -- @@ERROR is very short lived, grab it while you can.
IF @error_number = ... -- You can find the error numbers and the errors they relate to in the sysmessages table
If you can be more specific then perhaps I can give you a more specific response. There are somewhere in the region of 3900 error / warning messages in SQL Server.
My: Blog | Photos | Next SQL Presentation
WDevs.com - Open Source Code Hosting, Blogs, FTP, Mail and More
|
|
|
|
|
Ok. If i perform INSERT INTO ..... the most common error is cannot insert duplicate.
However if i perform the SELECT and all parameters in my SELECT statement are ok, i.e. no syntax error and also all columns and tables that i specifyed are exist. So is the posibility of some kind of an error that can occur???
------------------------------------
To study, study and only to study
|
|
|
|
|
|
So as i understand:
SELECT ..., ...., .... ... FROM ....
.....
....
SELECT @myERROR = @@ERROR
IF @myERROR != 0
here i handle the error, maybe call to RAISEERROR
------------------------------------
To study, study and only to study
|
|
|
|
|
|
I know that errors with severiry less than 10 is an information error(message) so in the C# i can't catch it in the try/catch/finally block, caues of that i will need to raise my error with severity > 10, after this i will catch this error in my application
------------------------------------
To study, study and only to study
|
|
|
|
|
|
ok. Thank u.
------------------------------------
To study, study and only to study
|
|
|
|
|
Most of the SQL errors are non-recoverable unless you are writing code to design SQL statements. If you commonly get SQL errors then it is a sign that your application isn't using the SQL server properly.
For example if you are adding a username/password to a registration database and the username must be unique, you should send a statement that returns different results based on a transacted exist/insert statement rather then trap the exception.
Common occurances shouldn't typically appear in error handling or catch blocks.
I can imagine the sinking feeling one would have after ordering my book,
only to find a laughably ridiculous theory with demented logic once the book arrives - Mark McCutcheon
|
|
|
|
|
I have not big DB (12 tables)
Suppose application is requested for some data about specified clientID
by means of ADO.NET i got all the relevant data about this clientID from several tables to the client, client perform tasks on this data change it maybe add something new or delete somtething (there is posibility that data will not changed at all), after this client click to SAVE button and return data to DB, here start the problem, i can delete all relevant data about specified clientID in all table and after this insert data tables (insert data that recieved from the application) or i can check what changed and to remove or update or insert data.
ANY IDEAS, and maybe how to perform my problem.
I'm using:
Windows Xp, .NET Framework, Sql-Server 2000.
------------------------------------
To study, study and only to study
|
|
|
|
|